summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k')
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig88
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile42
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c896
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.h87
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c461
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h242
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c1933
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h428
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2919
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h1174
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c1187
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h227
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c2542
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h267
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c640
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h234
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c910
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h396
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c281
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h2075
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c3064
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c1579
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c940
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h1132
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c8622
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h104
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.c156
-rw-r--r--drivers/net/wireless/ath/ath10k/p2p.h28
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c3798
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h260
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h1280
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c2147
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h227
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c1397
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h94
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.c568
-rw-r--r--drivers/net/wireless/ath/ath10k/spectral.h90
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c208
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h70
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h490
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c465
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.h46
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode_i.h71
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c221
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h64
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.c20
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h544
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c274
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h38
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c1125
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h128
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h1568
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c4034
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h2157
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c9235
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h7167
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c483
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h41
58 files changed, 70964 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
new file mode 100644
index 000000000..54ff59301
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -0,0 +1,88 @@
+config ATH10K
+ tristate "Atheros 802.11ac wireless cards support"
+ depends on MAC80211 && HAS_DMA
+ select ATH_COMMON
+ select CRC32
+ select WANT_DEV_COREDUMP
+ select ATH10K_CE
+ ---help---
+ This module adds support for wireless adapters based on
+ Atheros IEEE 802.11ac family of chipsets.
+
+ If you choose to build a module, it'll be called ath10k.
+
+config ATH10K_CE
+ bool
+
+config ATH10K_PCI
+ tristate "Atheros ath10k PCI support"
+ depends on ATH10K && PCI
+ ---help---
+ This module adds support for PCIE bus
+
+config ATH10K_AHB
+ bool "Atheros ath10k AHB support"
+ depends on ATH10K_PCI && OF && RESET_CONTROLLER
+ ---help---
+ This module adds support for AHB bus
+
+config ATH10K_SDIO
+ tristate "Atheros ath10k SDIO support (EXPERIMENTAL)"
+ depends on ATH10K && MMC
+ ---help---
+ This module adds experimental support for SDIO/MMC bus. Currently
+ work in progress and will not fully work.
+
+config ATH10K_USB
+ tristate "Atheros ath10k USB support (EXPERIMENTAL)"
+ depends on ATH10K && USB
+ ---help---
+ This module adds experimental support for USB bus. Currently
+ work in progress and will not fully work.
+
+config ATH10K_SNOC
+ tristate "Qualcomm ath10k SNOC support (EXPERIMENTAL)"
+ depends on ATH10K && ARCH_QCOM
+ ---help---
+ This module adds support for integrated WCN3990 chip connected
+ to system NOC(SNOC). Currently work in progress and will not
+ fully work.
+
+config ATH10K_DEBUG
+ bool "Atheros ath10k debugging"
+ depends on ATH10K
+ ---help---
+ Enables debug support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_DEBUGFS
+ bool "Atheros ath10k debugfs support"
+ depends on ATH10K && DEBUG_FS
+ ---help---
+ Enabled debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH10K_SPECTRAL
+ bool "Atheros ath10k spectral scan support"
+ depends on ATH10K_DEBUGFS
+ select RELAY
+ default n
+ ---help---
+ Say Y to enable access to the FFT/spectral data via debugfs.
+
+config ATH10K_TRACING
+ bool "Atheros ath10k tracing support"
+ depends on ATH10K
+ depends on EVENT_TRACING
+ ---help---
+ Select this to ath10k use tracing infrastructure.
+
+config ATH10K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH10K && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ This option enables DFS support for initiating radiation on
+ ath10k.
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
new file mode 100644
index 000000000..44d60a61b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ATH10K) += ath10k_core.o
+ath10k_core-y += mac.o \
+ debug.o \
+ core.o \
+ htc.o \
+ htt.o \
+ htt_rx.o \
+ htt_tx.o \
+ txrx.o \
+ wmi.o \
+ wmi-tlv.o \
+ bmi.o \
+ hw.o \
+ p2p.o \
+ swap.o
+
+ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
+ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
+ath10k_core-$(CONFIG_THERMAL) += thermal.o
+ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
+ath10k_core-$(CONFIG_PM) += wow.o
+ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
+ath10k_core-$(CONFIG_ATH10K_CE) += ce.o
+
+obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
+ath10k_pci-y += pci.o
+
+ath10k_pci-$(CONFIG_ATH10K_AHB) += ahb.o
+
+obj-$(CONFIG_ATH10K_SDIO) += ath10k_sdio.o
+ath10k_sdio-y += sdio.o
+
+obj-$(CONFIG_ATH10K_USB) += ath10k_usb.o
+ath10k_usb-y += usb.o
+
+obj-$(CONFIG_ATH10K_SNOC) += ath10k_snoc.o
+ath10k_snoc-y += snoc.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
new file mode 100644
index 000000000..be90c9e9e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -0,0 +1,896 @@
+/*
+ * Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include "core.h"
+#include "debug.h"
+#include "pci.h"
+#include "ahb.h"
+
+static const struct of_device_id ath10k_ahb_of_match[] = {
+ { .compatible = "qcom,ipq4019-wifi",
+ .data = (void *)ATH10K_HW_QCA4019
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
+
+#define QCA4019_SRAM_ADDR 0x000C0000
+#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
+
+static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
+{
+ return &((struct ath10k_pci *)ar->drv_priv)->ahb[0];
+}
+
+static void ath10k_ahb_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ iowrite32(value, ar_ahb->mem + offset);
+}
+
+static u32 ath10k_ahb_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->mem + offset);
+}
+
+static u32 ath10k_ahb_gcc_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->gcc_mem + offset);
+}
+
+static void ath10k_ahb_tcsr_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ iowrite32(value, ar_ahb->tcsr_mem + offset);
+}
+
+static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ return ioread32(ar_ahb->tcsr_mem + offset);
+}
+
+static u32 ath10k_ahb_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_ahb_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+static int ath10k_ahb_get_num_banks(struct ath10k *ar)
+{
+ if (ar->hw_rev == ATH10K_HW_QCA4019)
+ return 1;
+
+ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
+ return 1;
+}
+
+static int ath10k_ahb_clock_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ ar_ahb->cmd_clk = devm_clk_get(dev, "wifi_wcss_cmd");
+ if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) {
+ ath10k_err(ar, "failed to get cmd clk: %ld\n",
+ PTR_ERR(ar_ahb->cmd_clk));
+ return ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
+ }
+
+ ar_ahb->ref_clk = devm_clk_get(dev, "wifi_wcss_ref");
+ if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) {
+ ath10k_err(ar, "failed to get ref clk: %ld\n",
+ PTR_ERR(ar_ahb->ref_clk));
+ return ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
+ }
+
+ ar_ahb->rtc_clk = devm_clk_get(dev, "wifi_wcss_rtc");
+ if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
+ ath10k_err(ar, "failed to get rtc clk: %ld\n",
+ PTR_ERR(ar_ahb->rtc_clk));
+ return ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_clock_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->cmd_clk = NULL;
+ ar_ahb->ref_clk = NULL;
+ ar_ahb->rtc_clk = NULL;
+}
+
+static int ath10k_ahb_clock_enable(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) ||
+ IS_ERR_OR_NULL(ar_ahb->ref_clk) ||
+ IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
+ ath10k_err(ar, "clock(s) is/are not initialized\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->cmd_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable cmd clk: %d\n", ret);
+ goto out;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->ref_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable ref clk: %d\n", ret);
+ goto err_cmd_clk_disable;
+ }
+
+ ret = clk_prepare_enable(ar_ahb->rtc_clk);
+ if (ret) {
+ ath10k_err(ar, "failed to enable rtc clk: %d\n", ret);
+ goto err_ref_clk_disable;
+ }
+
+ return 0;
+
+err_ref_clk_disable:
+ clk_disable_unprepare(ar_ahb->ref_clk);
+
+err_cmd_clk_disable:
+ clk_disable_unprepare(ar_ahb->cmd_clk);
+
+out:
+ return ret;
+}
+
+static void ath10k_ahb_clock_disable(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ clk_disable_unprepare(ar_ahb->cmd_clk);
+
+ clk_disable_unprepare(ar_ahb->ref_clk);
+
+ clk_disable_unprepare(ar_ahb->rtc_clk);
+}
+
+static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ ar_ahb->core_cold_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_core_cold");
+ if (IS_ERR(ar_ahb->core_cold_rst)) {
+ ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->core_cold_rst));
+ return PTR_ERR(ar_ahb->core_cold_rst);
+ }
+
+ ar_ahb->radio_cold_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_radio_cold");
+ if (IS_ERR(ar_ahb->radio_cold_rst)) {
+ ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_cold_rst));
+ return PTR_ERR(ar_ahb->radio_cold_rst);
+ }
+
+ ar_ahb->radio_warm_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_radio_warm");
+ if (IS_ERR(ar_ahb->radio_warm_rst)) {
+ ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_warm_rst));
+ return PTR_ERR(ar_ahb->radio_warm_rst);
+ }
+
+ ar_ahb->radio_srif_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_radio_srif");
+ if (IS_ERR(ar_ahb->radio_srif_rst)) {
+ ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->radio_srif_rst));
+ return PTR_ERR(ar_ahb->radio_srif_rst);
+ }
+
+ ar_ahb->cpu_init_rst = devm_reset_control_get_exclusive(dev,
+ "wifi_cpu_init");
+ if (IS_ERR(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n",
+ PTR_ERR(ar_ahb->cpu_init_rst));
+ return PTR_ERR(ar_ahb->cpu_init_rst);
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->core_cold_rst = NULL;
+ ar_ahb->radio_cold_rst = NULL;
+ ar_ahb->radio_warm_rst = NULL;
+ ar_ahb->radio_srif_rst = NULL;
+ ar_ahb->cpu_init_rst = NULL;
+}
+
+static int ath10k_ahb_release_reset(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
+ return -EINVAL;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_cold_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio cold rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_warm_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio warm rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->radio_srif_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert radio srif rst: %d\n", ret);
+ return ret;
+ }
+
+ ret = reset_control_deassert(ar_ahb->cpu_init_rst);
+ if (ret) {
+ ath10k_err(ar, "failed to deassert cpu init rst: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_ahb_halt_axi_bus(struct ath10k *ar, u32 haltreq_reg,
+ u32 haltack_reg)
+{
+ unsigned long timeout;
+ u32 val;
+
+ /* Issue halt axi bus request */
+ val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
+ val |= AHB_AXI_BUS_HALT_REQ;
+ ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
+
+ /* Wait for axi bus halted ack */
+ timeout = jiffies + msecs_to_jiffies(ATH10K_AHB_AXI_BUS_HALT_TIMEOUT);
+ do {
+ val = ath10k_ahb_tcsr_read32(ar, haltack_reg);
+ if (val & AHB_AXI_BUS_HALT_ACK)
+ break;
+
+ mdelay(1);
+ } while (time_before(jiffies, timeout));
+
+ if (!(val & AHB_AXI_BUS_HALT_ACK)) {
+ ath10k_err(ar, "failed to halt axi bus: %d\n", val);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "axi bus halted\n");
+}
+
+static void ath10k_ahb_halt_chip(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ u32 core_id, glb_cfg_reg, haltreq_reg, haltack_reg;
+ u32 val;
+ int ret;
+
+ if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
+ IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
+ ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
+ return;
+ }
+
+ core_id = ath10k_ahb_read32(ar, ATH10K_AHB_WLAN_CORE_ID_REG);
+
+ switch (core_id) {
+ case 0:
+ glb_cfg_reg = ATH10K_AHB_TCSR_WIFI0_GLB_CFG;
+ haltreq_reg = ATH10K_AHB_TCSR_WCSS0_HALTREQ;
+ haltack_reg = ATH10K_AHB_TCSR_WCSS0_HALTACK;
+ break;
+ case 1:
+ glb_cfg_reg = ATH10K_AHB_TCSR_WIFI1_GLB_CFG;
+ haltreq_reg = ATH10K_AHB_TCSR_WCSS1_HALTREQ;
+ haltack_reg = ATH10K_AHB_TCSR_WCSS1_HALTACK;
+ break;
+ default:
+ ath10k_err(ar, "invalid core id %d found, skipping reset sequence\n",
+ core_id);
+ return;
+ }
+
+ ath10k_ahb_halt_axi_bus(ar, haltreq_reg, haltack_reg);
+
+ val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
+ val |= TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
+ ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
+
+ ret = reset_control_assert(ar_ahb->core_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert core cold rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio cold rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_warm_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio warm rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->radio_srif_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert radio srif rst: %d\n", ret);
+ msleep(1);
+
+ ret = reset_control_assert(ar_ahb->cpu_init_rst);
+ if (ret)
+ ath10k_err(ar, "failed to assert cpu init rst: %d\n", ret);
+ msleep(10);
+
+ /* Clear halt req and core clock disable req before
+ * deasserting wifi core reset.
+ */
+ val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
+ val &= ~AHB_AXI_BUS_HALT_REQ;
+ ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
+
+ val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
+ val &= ~TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
+ ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
+
+ ret = reset_control_deassert(ar_ahb->core_cold_rst);
+ if (ret)
+ ath10k_err(ar, "failed to deassert core cold rst: %d\n", ret);
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id);
+}
+
+static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+
+ if (!ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_ahb->irq,
+ ath10k_ahb_interrupt_handler,
+ IRQF_SHARED, "ath10k_ahb", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
+ ar_ahb->irq, ret);
+ return ret;
+ }
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
+
+ return 0;
+}
+
+static void ath10k_ahb_release_irq_legacy(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ free_irq(ar_ahb->irq, ar);
+}
+
+static void ath10k_ahb_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+}
+
+static int ath10k_ahb_resource_init(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct platform_device *pdev;
+ struct resource *res;
+ int ret;
+
+ pdev = ar_ahb->pdev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ath10k_err(ar, "failed to get memory resource\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ar_ahb->mem)) {
+ ath10k_err(ar, "mem ioremap error\n");
+ ret = PTR_ERR(ar_ahb->mem);
+ goto out;
+ }
+
+ ar_ahb->mem_len = resource_size(res);
+
+ ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE,
+ ATH10K_GCC_REG_SIZE);
+ if (!ar_ahb->gcc_mem) {
+ ath10k_err(ar, "gcc mem ioremap error\n");
+ ret = -ENOMEM;
+ goto err_mem_unmap;
+ }
+
+ ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE,
+ ATH10K_TCSR_REG_SIZE);
+ if (!ar_ahb->tcsr_mem) {
+ ath10k_err(ar, "tcsr mem ioremap error\n");
+ ret = -ENOMEM;
+ goto err_gcc_mem_unmap;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret);
+ goto err_tcsr_mem_unmap;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n",
+ ret);
+ goto err_tcsr_mem_unmap;
+ }
+
+ ret = ath10k_ahb_clock_init(ar);
+ if (ret)
+ goto err_tcsr_mem_unmap;
+
+ ret = ath10k_ahb_rst_ctrl_init(ar);
+ if (ret)
+ goto err_clock_deinit;
+
+ ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
+ if (ar_ahb->irq < 0) {
+ ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
+ ret = ar_ahb->irq;
+ goto err_clock_deinit;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
+ ar_ahb->mem, ar_ahb->mem_len,
+ ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
+ return 0;
+
+err_clock_deinit:
+ ath10k_ahb_clock_deinit(ar);
+
+err_tcsr_mem_unmap:
+ iounmap(ar_ahb->tcsr_mem);
+
+err_gcc_mem_unmap:
+ ar_ahb->tcsr_mem = NULL;
+ iounmap(ar_ahb->gcc_mem);
+
+err_mem_unmap:
+ ar_ahb->gcc_mem = NULL;
+ devm_iounmap(&pdev->dev, ar_ahb->mem);
+
+out:
+ ar_ahb->mem = NULL;
+ return ret;
+}
+
+static void ath10k_ahb_resource_deinit(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+ struct device *dev;
+
+ dev = &ar_ahb->pdev->dev;
+
+ if (ar_ahb->mem)
+ devm_iounmap(dev, ar_ahb->mem);
+
+ if (ar_ahb->gcc_mem)
+ iounmap(ar_ahb->gcc_mem);
+
+ if (ar_ahb->tcsr_mem)
+ iounmap(ar_ahb->tcsr_mem);
+
+ ar_ahb->mem = NULL;
+ ar_ahb->gcc_mem = NULL;
+ ar_ahb->tcsr_mem = NULL;
+
+ ath10k_ahb_clock_deinit(ar);
+ ath10k_ahb_rst_ctrl_deinit(ar);
+}
+
+static int ath10k_ahb_prepare_device(struct ath10k *ar)
+{
+ u32 val;
+ int ret;
+
+ ret = ath10k_ahb_clock_enable(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* Clock for the target is supplied from outside of target (ie,
+ * external clock module controlled by the host). Target needs
+ * to know what frequency target cpu is configured which is needed
+ * for target internal use. Read target cpu frequency info from
+ * gcc register and write into target's scratch register where
+ * target expects this information.
+ */
+ val = ath10k_ahb_gcc_read32(ar, ATH10K_AHB_GCC_FEPLL_PLL_DIV);
+ ath10k_ahb_write32(ar, ATH10K_AHB_WIFI_SCRATCH_5_REG, val);
+
+ ret = ath10k_ahb_release_reset(ar);
+ if (ret)
+ goto err_clk_disable;
+
+ ath10k_ahb_irq_disable(ar);
+
+ ath10k_ahb_write32(ar, FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret)
+ goto err_halt_chip;
+
+ return 0;
+
+err_halt_chip:
+ ath10k_ahb_halt_chip(ar);
+
+err_clk_disable:
+ ath10k_ahb_clock_disable(ar);
+
+ return ret;
+}
+
+static int ath10k_ahb_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+
+ ret = ath10k_ahb_prepare_device(ar);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath10k_ahb_wake_target_cpu(struct ath10k *ar)
+{
+ u32 addr, val;
+
+ addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
+ val = ath10k_ahb_read32(ar, addr);
+ val |= ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK;
+ ath10k_ahb_write32(ar, addr, val);
+
+ return 0;
+}
+
+static int ath10k_ahb_hif_start(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
+
+ napi_enable(&ar->napi);
+ ath10k_ce_enable_interrupts(ar);
+ ath10k_pci_enable_legacy_irq(ar);
+
+ ath10k_pci_rx_post(ar);
+
+ return 0;
+}
+
+static void ath10k_ahb_hif_stop(struct ath10k *ar)
+{
+ struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n");
+
+ ath10k_ahb_irq_disable(ar);
+ synchronize_irq(ar_ahb->irq);
+
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+
+ ath10k_pci_flush(ar);
+}
+
+static int ath10k_ahb_hif_power_up(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n");
+
+ ret = ath10k_ahb_chip_reset(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto out;
+ }
+
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto out;
+ }
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup init config: %d\n", ret);
+ goto err_ce_deinit;
+ }
+
+ ret = ath10k_ahb_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
+ goto err_ce_deinit;
+ }
+
+ return 0;
+
+err_ce_deinit:
+ ath10k_pci_ce_deinit(ar);
+out:
+ return ret;
+}
+
+static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+
+ if (region >= QCA4019_SRAM_ADDR && region <=
+ (QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
+ /* SRAM contents for QCA4019 can be directly accessed and
+ * no conversions are required
+ */
+ val |= region;
+ } else {
+ val |= 0x100000 | region;
+ }
+
+ return val;
+}
+
+static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
+ .tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+ .diag_write = ath10k_pci_diag_write_mem,
+ .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
+ .start = ath10k_ahb_hif_start,
+ .stop = ath10k_ahb_hif_stop,
+ .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_pci_hif_get_default_pipe,
+ .send_complete_check = ath10k_pci_hif_send_complete_check,
+ .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+ .power_up = ath10k_ahb_hif_power_up,
+ .power_down = ath10k_pci_hif_power_down,
+ .read32 = ath10k_ahb_read32,
+ .write32 = ath10k_ahb_write32,
+};
+
+static const struct ath10k_bus_ops ath10k_ahb_bus_ops = {
+ .read32 = ath10k_ahb_read32,
+ .write32 = ath10k_ahb_write32,
+ .get_num_banks = ath10k_ahb_get_num_banks,
+};
+
+static int ath10k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath10k *ar;
+ struct ath10k_ahb *ar_ahb;
+ struct ath10k_pci *ar_pci;
+ const struct of_device_id *of_id;
+ enum ath10k_hw_rev hw_rev;
+ size_t size;
+ int ret;
+ u32 chip_id;
+
+ of_id = of_match_device(ath10k_ahb_of_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ hw_rev = (enum ath10k_hw_rev)of_id->data;
+
+ size = sizeof(*ar_pci) + sizeof(*ar_ahb);
+ ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB,
+ hw_rev, &ath10k_ahb_hif_ops);
+ if (!ar) {
+ dev_err(&pdev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n");
+
+ ar_pci = ath10k_pci_priv(ar);
+ ar_ahb = ath10k_ahb_priv(ar);
+
+ ar_ahb->pdev = pdev;
+ platform_set_drvdata(pdev, ar);
+
+ ret = ath10k_ahb_resource_init(ar);
+ if (ret)
+ goto err_core_destroy;
+
+ ar->dev_id = 0;
+ ar_pci->mem = ar_ahb->mem;
+ ar_pci->mem_len = ar_ahb->mem_len;
+ ar_pci->ar = ar;
+ ar_pci->ce.bus_ops = &ath10k_ahb_bus_ops;
+ ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
+ ar->ce_priv = &ar_pci->ce;
+
+ ret = ath10k_pci_setup_resource(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup resource: %d\n", ret);
+ goto err_resource_deinit;
+ }
+
+ ath10k_pci_init_napi(ar);
+
+ ret = ath10k_ahb_request_irq_legacy(ar);
+ if (ret)
+ goto err_free_pipes;
+
+ ret = ath10k_ahb_prepare_device(ar);
+ if (ret)
+ goto err_free_irq;
+
+ ath10k_pci_ce_deinit(ar);
+
+ chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (chip_id == 0xffffffff) {
+ ath10k_err(ar, "failed to get chip id\n");
+ ret = -ENODEV;
+ goto err_halt_device;
+ }
+
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_halt_device;
+ }
+
+ return 0;
+
+err_halt_device:
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+
+err_free_irq:
+ ath10k_ahb_release_irq_legacy(ar);
+
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
+
+err_resource_deinit:
+ ath10k_ahb_resource_deinit(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int ath10k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+ struct ath10k_ahb *ar_ahb;
+
+ if (!ar)
+ return -EINVAL;
+
+ ar_ahb = ath10k_ahb_priv(ar);
+
+ if (!ar_ahb)
+ return -EINVAL;
+
+ ath10k_dbg(ar, ATH10K_DBG_AHB, "ahb remove\n");
+
+ ath10k_core_unregister(ar);
+ ath10k_ahb_irq_disable(ar);
+ ath10k_ahb_release_irq_legacy(ar);
+ ath10k_pci_release_resource(ar);
+ ath10k_ahb_halt_chip(ar);
+ ath10k_ahb_clock_disable(ar);
+ ath10k_ahb_resource_deinit(ar);
+ ath10k_core_destroy(ar);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver ath10k_ahb_driver = {
+ .driver = {
+ .name = "ath10k_ahb",
+ .of_match_table = ath10k_ahb_of_match,
+ },
+ .probe = ath10k_ahb_probe,
+ .remove = ath10k_ahb_remove,
+};
+
+int ath10k_ahb_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&ath10k_ahb_driver);
+ if (ret)
+ printk(KERN_ERR "failed to register ath10k ahb driver: %d\n",
+ ret);
+ return ret;
+}
+
+void ath10k_ahb_exit(void)
+{
+ platform_driver_unregister(&ath10k_ahb_driver);
+}
diff --git a/drivers/net/wireless/ath/ath10k/ahb.h b/drivers/net/wireless/ath/ath10k/ahb.h
new file mode 100644
index 000000000..d43e37521
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ahb.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (c) 2016 Qualcomm Atheros, Inc. All rights reserved.
+ * Copyright (c) 2015 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _AHB_H_
+#define _AHB_H_
+
+#include <linux/platform_device.h>
+
+struct ath10k_ahb {
+ struct platform_device *pdev;
+ void __iomem *mem;
+ unsigned long mem_len;
+ void __iomem *gcc_mem;
+ void __iomem *tcsr_mem;
+
+ int irq;
+
+ struct clk *cmd_clk;
+ struct clk *ref_clk;
+ struct clk *rtc_clk;
+
+ struct reset_control *core_cold_rst;
+ struct reset_control *radio_cold_rst;
+ struct reset_control *radio_warm_rst;
+ struct reset_control *radio_srif_rst;
+ struct reset_control *cpu_init_rst;
+};
+
+#ifdef CONFIG_ATH10K_AHB
+
+#define ATH10K_GCC_REG_BASE 0x1800000
+#define ATH10K_GCC_REG_SIZE 0x60000
+
+#define ATH10K_TCSR_REG_BASE 0x1900000
+#define ATH10K_TCSR_REG_SIZE 0x80000
+
+#define ATH10K_AHB_GCC_FEPLL_PLL_DIV 0x2f020
+#define ATH10K_AHB_WIFI_SCRATCH_5_REG 0x4f014
+
+#define ATH10K_AHB_WLAN_CORE_ID_REG 0x82030
+
+#define ATH10K_AHB_TCSR_WIFI0_GLB_CFG 0x49000
+#define ATH10K_AHB_TCSR_WIFI1_GLB_CFG 0x49004
+#define TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK BIT(25)
+
+#define ATH10K_AHB_TCSR_WCSS0_HALTREQ 0x52000
+#define ATH10K_AHB_TCSR_WCSS1_HALTREQ 0x52010
+#define ATH10K_AHB_TCSR_WCSS0_HALTACK 0x52004
+#define ATH10K_AHB_TCSR_WCSS1_HALTACK 0x52014
+
+#define ATH10K_AHB_AXI_BUS_HALT_TIMEOUT 10 /* msec */
+#define AHB_AXI_BUS_HALT_REQ 1
+#define AHB_AXI_BUS_HALT_ACK 1
+
+#define ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK 1
+
+int ath10k_ahb_init(void);
+void ath10k_ahb_exit(void);
+
+#else /* CONFIG_ATH10K_AHB */
+
+static inline int ath10k_ahb_init(void)
+{
+ return 0;
+}
+
+static inline void ath10k_ahb_exit(void)
+{
+}
+
+#endif /* CONFIG_ATH10K_AHB */
+
+#endif /* _AHB_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
new file mode 100644
index 000000000..af4978d6a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "bmi.h"
+#include "hif.h"
+#include "debug.h"
+#include "htc.h"
+#include "hw.h"
+
+void ath10k_bmi_start(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
+
+ ar->bmi.done_sent = false;
+
+ /* Enable hardware clock to speed up firmware download */
+ if (ar->hw_params.hw_ops->enable_pll_clk) {
+ ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
+ }
+}
+
+int ath10k_bmi_done(struct ath10k *ar)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi done\n");
+
+ if (ar->bmi.done_sent) {
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi skipped\n");
+ return 0;
+ }
+
+ ar->bmi.done_sent = true;
+ cmd.id = __cpu_to_le32(BMI_DONE);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+ u32 resplen = sizeof(resp.get_target_info);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n");
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to get target info from device\n");
+ return ret;
+ }
+
+ if (resplen < sizeof(resp.get_target_info)) {
+ ath10k_warn(ar, "invalid get_target_info response length (%d)\n",
+ resplen);
+ return -EIO;
+ }
+
+ target_info->version = __le32_to_cpu(resp.get_target_info.version);
+ target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
+ return 0;
+}
+
+#define TARGET_VERSION_SENTINAL 0xffffffffu
+
+int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info);
+ u32 resplen, ver_len;
+ __le32 tmp;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info SDIO\n");
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "BMI Get Target Info Command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO);
+
+ /* Step 1: Read 4 bytes of the target info and check if it is
+ * the special sentinal version word or the first word in the
+ * version response.
+ */
+ resplen = sizeof(u32);
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &tmp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from device\n");
+ return ret;
+ }
+
+ /* Some SDIO boards have a special sentinal byte before the real
+ * version response.
+ */
+ if (__le32_to_cpu(tmp) == TARGET_VERSION_SENTINAL) {
+ /* Step 1b: Read the version length */
+ resplen = sizeof(u32);
+ ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0, &tmp,
+ &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from device\n");
+ return ret;
+ }
+ }
+
+ ver_len = __le32_to_cpu(tmp);
+
+ /* Step 2: Check the target info length */
+ if (ver_len != sizeof(resp.get_target_info)) {
+ ath10k_warn(ar, "Unexpected target info len: %u. Expected: %zu\n",
+ ver_len, sizeof(resp.get_target_info));
+ return -EINVAL;
+ }
+
+ /* Step 3: Read the rest of the version response */
+ resplen = sizeof(resp.get_target_info) - sizeof(u32);
+ ret = ath10k_hif_exchange_bmi_msg(ar, NULL, 0,
+ &resp.get_target_info.version,
+ &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from device\n");
+ return ret;
+ }
+
+ target_info->version = __le32_to_cpu(resp.get_target_info.version);
+ target_info->type = __le32_to_cpu(resp.get_target_info.type);
+
+ return 0;
+}
+
+int ath10k_bmi_read_memory(struct ath10k *ar,
+ u32 address, void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem);
+ u32 rxlen;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n",
+ address, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE);
+
+ cmd.id = __cpu_to_le32(BMI_READ_MEMORY);
+ cmd.read_mem.addr = __cpu_to_le32(address);
+ cmd.read_mem.len = __cpu_to_le32(rxlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
+ &resp, &rxlen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from the device (%d)\n",
+ ret);
+ return ret;
+ }
+
+ memcpy(buffer, resp.read_mem.payload, rxlen);
+ address += rxlen;
+ buffer += rxlen;
+ length -= rxlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.write_soc_reg);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
+ "bmi write soc register 0x%08x val 0x%08x\n",
+ address, reg_val);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "bmi write soc register command in progress\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_WRITE_SOC_REGISTER);
+ cmd.write_soc_reg.addr = __cpu_to_le32(address);
+ cmd.write_soc_reg.value = __cpu_to_le32(reg_val);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "Unable to write soc register to device: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_soc_reg);
+ u32 resplen = sizeof(resp.read_soc_reg);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register 0x%08x\n",
+ address);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "bmi read soc register command in progress\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_READ_SOC_REGISTER);
+ cmd.read_soc_reg.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "Unable to read soc register from device: %d\n",
+ ret);
+ return ret;
+ }
+
+ *reg_val = __le32_to_cpu(resp.read_soc_reg.value);
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read soc register value 0x%08x\n",
+ *reg_val);
+
+ return 0;
+}
+
+int ath10k_bmi_write_memory(struct ath10k *ar,
+ u32 address, const void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.write_mem);
+ u32 txlen;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi write address 0x%x length %d\n",
+ address, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+ /* copy before roundup to avoid reading beyond buffer*/
+ memcpy(cmd.write_mem.payload, buffer, txlen);
+ txlen = roundup(txlen, 4);
+
+ cmd.id = __cpu_to_le32(BMI_WRITE_MEMORY);
+ cmd.write_mem.addr = __cpu_to_le32(address);
+ cmd.write_mem.len = __cpu_to_le32(txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* fixup roundup() so `length` zeroes out for last chunk */
+ txlen = min(txlen, length);
+
+ address += txlen;
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result)
+{
+ struct bmi_cmd cmd;
+ union bmi_resp resp;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute);
+ u32 resplen = sizeof(resp.execute);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n",
+ address, param);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_EXECUTE);
+ cmd.execute.addr = __cpu_to_le32(address);
+ cmd.execute.param = __cpu_to_le32(param);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen);
+ if (ret) {
+ ath10k_warn(ar, "unable to read from the device\n");
+ return ret;
+ }
+
+ if (resplen < sizeof(resp.execute)) {
+ ath10k_warn(ar, "invalid execute response length (%d)\n",
+ resplen);
+ return -EIO;
+ }
+
+ *result = __le32_to_cpu(resp.execute.result);
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result);
+
+ return 0;
+}
+
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length)
+{
+ struct bmi_cmd cmd;
+ u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data);
+ u32 txlen;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%pK length %d\n",
+ buffer, length);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ while (length) {
+ txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen);
+
+ WARN_ON_ONCE(txlen & 3);
+
+ cmd.id = __cpu_to_le32(BMI_LZ_DATA);
+ cmd.lz_data.len = __cpu_to_le32(txlen);
+ memcpy(cmd.lz_data.payload, buffer, txlen);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
+ NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to the device\n");
+ return ret;
+ }
+
+ buffer += txlen;
+ length -= txlen;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address)
+{
+ struct bmi_cmd cmd;
+ u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz stream start address 0x%x\n",
+ address);
+
+ if (ar->bmi.done_sent) {
+ ath10k_warn(ar, "command disallowed\n");
+ return -EBUSY;
+ }
+
+ cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START);
+ cmd.lz_start.addr = __cpu_to_le32(address);
+
+ ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL);
+ if (ret) {
+ ath10k_warn(ar, "unable to Start LZ Stream to the device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_bmi_fast_download(struct ath10k *ar,
+ u32 address, const void *buffer, u32 length)
+{
+ u8 trailer[4] = {};
+ u32 head_len = rounddown(length, 4);
+ u32 trailer_len = length - head_len;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
+ "bmi fast download address 0x%x buffer 0x%pK length %d\n",
+ address, buffer, length);
+
+ ret = ath10k_bmi_lz_stream_start(ar, address);
+ if (ret)
+ return ret;
+
+ /* copy the last word into a zero padded buffer */
+ if (trailer_len > 0)
+ memcpy(trailer, buffer + head_len, trailer_len);
+
+ ret = ath10k_bmi_lz_data(ar, buffer, head_len);
+ if (ret)
+ return ret;
+
+ if (trailer_len > 0)
+ ret = ath10k_bmi_lz_data(ar, trailer, 4);
+
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Close compressed stream and open a new (fake) one.
+ * This serves mainly to flush Target caches.
+ */
+ ret = ath10k_bmi_lz_stream_start(ar, 0x00);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
new file mode 100644
index 000000000..9a396817a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BMI_H_
+#define _BMI_H_
+
+#include "core.h"
+
+/*
+ * Bootloader Messaging Interface (BMI)
+ *
+ * BMI is a very simple messaging interface used during initialization
+ * to read memory, write memory, execute code, and to define an
+ * application entry PC.
+ *
+ * It is used to download an application to QCA988x, to provide
+ * patches to code that is already resident on QCA988x, and generally
+ * to examine and modify state. The Host has an opportunity to use
+ * BMI only once during bootup. Once the Host issues a BMI_DONE
+ * command, this opportunity ends.
+ *
+ * The Host writes BMI requests to mailbox0, and reads BMI responses
+ * from mailbox0. BMI requests all begin with a command
+ * (see below for specific commands), and are followed by
+ * command-specific data.
+ *
+ * Flow control:
+ * The Host can only issue a command once the Target gives it a
+ * "BMI Command Credit", using AR8K Counter #4. As soon as the
+ * Target has completed a command, it issues another BMI Command
+ * Credit (so the Host can issue the next command).
+ *
+ * BMI handles all required Target-side cache flushing.
+ */
+
+/* Maximum data size used for BMI transfers */
+#define BMI_MAX_DATA_SIZE 256
+
+/* len = cmd + addr + length */
+#define BMI_MAX_CMDBUF_SIZE (BMI_MAX_DATA_SIZE + \
+ sizeof(u32) + \
+ sizeof(u32) + \
+ sizeof(u32))
+
+/* BMI Commands */
+
+enum bmi_cmd_id {
+ BMI_NO_COMMAND = 0,
+ BMI_DONE = 1,
+ BMI_READ_MEMORY = 2,
+ BMI_WRITE_MEMORY = 3,
+ BMI_EXECUTE = 4,
+ BMI_SET_APP_START = 5,
+ BMI_READ_SOC_REGISTER = 6,
+ BMI_READ_SOC_WORD = 6,
+ BMI_WRITE_SOC_REGISTER = 7,
+ BMI_WRITE_SOC_WORD = 7,
+ BMI_GET_TARGET_ID = 8,
+ BMI_GET_TARGET_INFO = 8,
+ BMI_ROMPATCH_INSTALL = 9,
+ BMI_ROMPATCH_UNINSTALL = 10,
+ BMI_ROMPATCH_ACTIVATE = 11,
+ BMI_ROMPATCH_DEACTIVATE = 12,
+ BMI_LZ_STREAM_START = 13, /* should be followed by LZ_DATA */
+ BMI_LZ_DATA = 14,
+ BMI_NVRAM_PROCESS = 15,
+};
+
+#define BMI_NVRAM_SEG_NAME_SZ 16
+
+#define BMI_PARAM_GET_EEPROM_BOARD_ID 0x10
+#define BMI_PARAM_GET_FLASH_BOARD_ID 0x8000
+#define BMI_PARAM_FLASH_SECTION_ALL 0x10000
+
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_MASK 0x7c00
+#define ATH10K_BMI_BOARD_ID_FROM_OTP_LSB 10
+
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_MASK 0x18000
+#define ATH10K_BMI_CHIP_ID_FROM_OTP_LSB 15
+
+#define ATH10K_BMI_BOARD_ID_STATUS_MASK 0xff
+
+struct bmi_cmd {
+ __le32 id; /* enum bmi_cmd_id */
+ union {
+ struct {
+ } done;
+ struct {
+ __le32 addr;
+ __le32 len;
+ } read_mem;
+ struct {
+ __le32 addr;
+ __le32 len;
+ u8 payload[0];
+ } write_mem;
+ struct {
+ __le32 addr;
+ __le32 param;
+ } execute;
+ struct {
+ __le32 addr;
+ } set_app_start;
+ struct {
+ __le32 addr;
+ } read_soc_reg;
+ struct {
+ __le32 addr;
+ __le32 value;
+ } write_soc_reg;
+ struct {
+ } get_target_info;
+ struct {
+ __le32 rom_addr;
+ __le32 ram_addr; /* or value */
+ __le32 size;
+ __le32 activate; /* 0=install, but dont activate */
+ } rompatch_install;
+ struct {
+ __le32 patch_id;
+ } rompatch_uninstall;
+ struct {
+ __le32 count;
+ __le32 patch_ids[0]; /* length of @count */
+ } rompatch_activate;
+ struct {
+ __le32 count;
+ __le32 patch_ids[0]; /* length of @count */
+ } rompatch_deactivate;
+ struct {
+ __le32 addr;
+ } lz_start;
+ struct {
+ __le32 len; /* max BMI_MAX_DATA_SIZE */
+ u8 payload[0]; /* length of @len */
+ } lz_data;
+ struct {
+ u8 name[BMI_NVRAM_SEG_NAME_SZ];
+ } nvram_process;
+ u8 payload[BMI_MAX_CMDBUF_SIZE];
+ };
+} __packed;
+
+union bmi_resp {
+ struct {
+ u8 payload[0];
+ } read_mem;
+ struct {
+ __le32 result;
+ } execute;
+ struct {
+ __le32 value;
+ } read_soc_reg;
+ struct {
+ __le32 len;
+ __le32 version;
+ __le32 type;
+ } get_target_info;
+ struct {
+ __le32 patch_id;
+ } rompatch_install;
+ struct {
+ __le32 patch_id;
+ } rompatch_uninstall;
+ struct {
+ /* 0 = nothing executed
+ * otherwise = NVRAM segment return value
+ */
+ __le32 result;
+ } nvram_process;
+ u8 payload[BMI_MAX_CMDBUF_SIZE];
+} __packed;
+
+struct bmi_target_info {
+ u32 version;
+ u32 type;
+};
+
+/* in jiffies */
+#define BMI_COMMUNICATION_TIMEOUT_HZ (3 * HZ)
+
+#define BMI_CE_NUM_TO_TARG 0
+#define BMI_CE_NUM_TO_HOST 1
+
+void ath10k_bmi_start(struct ath10k *ar);
+int ath10k_bmi_done(struct ath10k *ar);
+int ath10k_bmi_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info);
+int ath10k_bmi_get_target_info_sdio(struct ath10k *ar,
+ struct bmi_target_info *target_info);
+int ath10k_bmi_read_memory(struct ath10k *ar, u32 address,
+ void *buffer, u32 length);
+int ath10k_bmi_write_memory(struct ath10k *ar, u32 address,
+ const void *buffer, u32 length);
+
+#define ath10k_bmi_read32(ar, item, val) \
+ ({ \
+ int ret; \
+ u32 addr; \
+ __le32 tmp; \
+ \
+ addr = host_interest_item_address(HI_ITEM(item)); \
+ ret = ath10k_bmi_read_memory(ar, addr, (u8 *)&tmp, 4); \
+ if (!ret) \
+ *val = __le32_to_cpu(tmp); \
+ ret; \
+ })
+
+#define ath10k_bmi_write32(ar, item, val) \
+ ({ \
+ int ret; \
+ u32 address; \
+ __le32 v = __cpu_to_le32(val); \
+ \
+ address = host_interest_item_address(HI_ITEM(item)); \
+ ret = ath10k_bmi_write_memory(ar, address, \
+ (u8 *)&v, sizeof(v)); \
+ ret; \
+ })
+
+int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result);
+int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address);
+int ath10k_bmi_lz_data(struct ath10k *ar, const void *buffer, u32 length);
+int ath10k_bmi_fast_download(struct ath10k *ar, u32 address,
+ const void *buffer, u32 length);
+int ath10k_bmi_read_soc_reg(struct ath10k *ar, u32 address, u32 *reg_val);
+int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val);
+#endif /* _BMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
new file mode 100644
index 000000000..2276d608b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -0,0 +1,1933 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hif.h"
+#include "ce.h"
+#include "debug.h"
+
+/*
+ * Support for Copy Engine hardware, which is mainly used for
+ * communication between Host and Target over a PCIe interconnect.
+ */
+
+/*
+ * A single CopyEngine (CE) comprises two "rings":
+ * a source ring
+ * a destination ring
+ *
+ * Each ring consists of a number of descriptors which specify
+ * an address, length, and meta-data.
+ *
+ * Typically, one side of the PCIe/AHB/SNOC interconnect (Host or Target)
+ * controls one ring and the other side controls the other ring.
+ * The source side chooses when to initiate a transfer and it
+ * chooses what to send (buffer address, length). The destination
+ * side keeps a supply of "anonymous receive buffers" available and
+ * it handles incoming data as it arrives (when the destination
+ * receives an interrupt).
+ *
+ * The sender may send a simple buffer (address/length) or it may
+ * send a small list of buffers. When a small list is sent, hardware
+ * "gathers" these and they end up in a single destination buffer
+ * with a single interrupt.
+ *
+ * There are several "contexts" managed by this layer -- more, it
+ * may seem -- than should be needed. These are provided mainly for
+ * maximum flexibility and especially to facilitate a simpler HIF
+ * implementation. There are per-CopyEngine recv, send, and watermark
+ * contexts. These are supplied by the caller when a recv, send,
+ * or watermark handler is established and they are echoed back to
+ * the caller when the respective callbacks are invoked. There is
+ * also a per-transfer context supplied by the caller when a buffer
+ * (or sendlist) is sent and when a buffer is enqueued for recv.
+ * These per-transfer contexts are echoed back to the caller when
+ * the buffer is sent/received.
+ */
+
+static inline u32 shadow_sr_wr_ind_addr(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state)
+{
+ u32 ce_id = ce_state->id;
+ u32 addr = 0;
+
+ switch (ce_id) {
+ case 0:
+ addr = 0x00032000;
+ break;
+ case 3:
+ addr = 0x0003200C;
+ break;
+ case 4:
+ addr = 0x00032010;
+ break;
+ case 5:
+ addr = 0x00032014;
+ break;
+ case 7:
+ addr = 0x0003201C;
+ break;
+ default:
+ ath10k_warn(ar, "invalid CE id: %d", ce_id);
+ break;
+ }
+ return addr;
+}
+
+static inline u32 shadow_dst_wr_ind_addr(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state)
+{
+ u32 ce_id = ce_state->id;
+ u32 addr = 0;
+
+ switch (ce_id) {
+ case 1:
+ addr = 0x00032034;
+ break;
+ case 2:
+ addr = 0x00032038;
+ break;
+ case 5:
+ addr = 0x00032044;
+ break;
+ case 7:
+ addr = 0x0003204C;
+ break;
+ case 8:
+ addr = 0x00032050;
+ break;
+ case 9:
+ addr = 0x00032054;
+ break;
+ case 10:
+ addr = 0x00032058;
+ break;
+ case 11:
+ addr = 0x0003205C;
+ break;
+ default:
+ ath10k_warn(ar, "invalid CE id: %d", ce_id);
+ break;
+ }
+
+ return addr;
+}
+
+static inline unsigned int
+ath10k_set_ring_byte(unsigned int offset,
+ struct ath10k_hw_ce_regs_addr_map *addr_map)
+{
+ return ((offset << addr_map->lsb) & addr_map->mask);
+}
+
+static inline unsigned int
+ath10k_get_ring_byte(unsigned int offset,
+ struct ath10k_hw_ce_regs_addr_map *addr_map)
+{
+ return ((offset & addr_map->mask) >> (addr_map->lsb));
+}
+
+static inline u32 ath10k_ce_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return ce->bus_ops->read32(ar, offset);
+}
+
+static inline void ath10k_ce_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ ce->bus_ops->write32(ar, offset, value);
+}
+
+static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dst_wr_index_addr, n);
+}
+
+static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dst_wr_index_addr);
+}
+
+static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_wr_index_addr, n);
+}
+
+static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ return ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_wr_index_addr);
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_from_ddr(struct ath10k *ar,
+ u32 ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return ce->vaddr_rri[ce_id] & CE_DDR_RRI_MASK;
+}
+
+static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 index;
+
+ if (ar->hw_params.rri_on_ddr &&
+ (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+ index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_id);
+ else
+ index = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->current_srri_addr);
+
+ return index;
+}
+
+static inline void
+ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state,
+ unsigned int value)
+{
+ ath10k_ce_write32(ar, shadow_sr_wr_ind_addr(ar, ce_state), value);
+}
+
+static inline void
+ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state,
+ unsigned int value)
+{
+ ath10k_ce_write32(ar, shadow_dst_wr_ind_addr(ar, ce_state), value);
+}
+
+static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int addr)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_base_addr, addr);
+}
+
+static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_size_addr, n);
+}
+
+static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+
+ u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ctrl_regs->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+ (ctrl1_addr & ~(ctrl_regs->dmax->mask)) |
+ ath10k_set_ring_byte(n, ctrl_regs->dmax));
+}
+
+static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+
+ u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ctrl_regs->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+ (ctrl1_addr & ~(ctrl_regs->src_ring->mask)) |
+ ath10k_set_ring_byte(n, ctrl_regs->src_ring));
+}
+
+static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ struct ath10k_hw_ce_ctrl1 *ctrl_regs = ar->hw_ce_regs->ctrl1_regs;
+
+ u32 ctrl1_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ctrl_regs->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ctrl_regs->addr,
+ (ctrl1_addr & ~(ctrl_regs->dst_ring->mask)) |
+ ath10k_set_ring_byte(n, ctrl_regs->dst_ring));
+}
+
+static inline
+ u32 ath10k_ce_dest_ring_read_index_from_ddr(struct ath10k *ar, u32 ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return (ce->vaddr_rri[ce_id] >> CE_DDR_DRRI_SHIFT) &
+ CE_DDR_RRI_MASK;
+}
+
+static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ u32 ce_id = COPY_ENGINE_ID(ce_ctrl_addr);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 index;
+
+ if (ar->hw_params.rri_on_ddr &&
+ (ce_state->attr_flags & CE_ATTR_DIS_INTR))
+ index = ath10k_ce_dest_ring_read_index_from_ddr(ar, ce_id);
+ else
+ index = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->current_drri_addr);
+
+ return index;
+}
+
+static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u32 addr)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr, addr);
+}
+
+static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_size_addr, n);
+}
+
+static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
+ (addr & ~(srcr_wm->wm_high->mask)) |
+ (ath10k_set_ring_byte(n, srcr_wm->wm_high)));
+}
+
+static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ struct ath10k_hw_ce_dst_src_wm_regs *srcr_wm = ar->hw_ce_regs->wm_srcr;
+ u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + srcr_wm->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + srcr_wm->addr,
+ (addr & ~(srcr_wm->wm_low->mask)) |
+ (ath10k_set_ring_byte(n, srcr_wm->wm_low)));
+}
+
+static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
+ (addr & ~(dstr_wm->wm_high->mask)) |
+ (ath10k_set_ring_byte(n, dstr_wm->wm_high)));
+}
+
+static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ struct ath10k_hw_ce_dst_src_wm_regs *dstr_wm = ar->hw_ce_regs->wm_dstr;
+ u32 addr = ath10k_ce_read32(ar, ce_ctrl_addr + dstr_wm->addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + dstr_wm->addr,
+ (addr & ~(dstr_wm->wm_low->mask)) |
+ (ath10k_set_ring_byte(n, dstr_wm->wm_low)));
+}
+
+static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+
+ u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->host_ie_addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+ host_ie_addr | host_ie->copy_complete->mask);
+}
+
+static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ struct ath10k_hw_ce_host_ie *host_ie = ar->hw_ce_regs->host_ie;
+
+ u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->host_ie_addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+ host_ie_addr & ~(host_ie->copy_complete->mask));
+}
+
+static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+
+ u32 host_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->host_ie_addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + ar->hw_ce_regs->host_ie_addr,
+ host_ie_addr & ~(wm_regs->wm_mask));
+}
+
+static inline void ath10k_ce_error_intr_enable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
+
+ u32 misc_ie_addr = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->misc_ie_addr);
+
+ ath10k_ce_write32(ar,
+ ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
+ misc_ie_addr | misc_regs->err_mask);
+}
+
+static inline void ath10k_ce_error_intr_disable(struct ath10k *ar,
+ u32 ce_ctrl_addr)
+{
+ struct ath10k_hw_ce_misc_regs *misc_regs = ar->hw_ce_regs->misc_regs;
+
+ u32 misc_ie_addr = ath10k_ce_read32(ar,
+ ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr);
+
+ ath10k_ce_write32(ar,
+ ce_ctrl_addr + ar->hw_ce_regs->misc_ie_addr,
+ misc_ie_addr & ~(misc_regs->err_mask));
+}
+
+static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int mask)
+{
+ struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+
+ ath10k_ce_write32(ar, ce_ctrl_addr + wm_regs->addr, mask);
+}
+
+/*
+ * Guts of ath10k_ce_send.
+ * The caller takes responsibility for any needed locking.
+ */
+static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ struct ce_desc *desc, sdesc;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int write_index = src_ring->write_index;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ u32 desc_flags = 0;
+ int ret = 0;
+
+ if (nbytes > ce_state->src_sz_max)
+ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+ __func__, nbytes, ce_state->src_sz_max);
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) <= 0)) {
+ ret = -ENOSR;
+ goto exit;
+ }
+
+ desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+ write_index);
+
+ desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+ if (flags & CE_SEND_FLAG_GATHER)
+ desc_flags |= CE_DESC_FLAGS_GATHER;
+ if (flags & CE_SEND_FLAG_BYTE_SWAP)
+ desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+ sdesc.addr = __cpu_to_le32(buffer);
+ sdesc.nbytes = __cpu_to_le16(nbytes);
+ sdesc.flags = __cpu_to_le16(desc_flags);
+
+ *desc = sdesc;
+
+ src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+ /* Update Source Ring Write Index */
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+ /* WORKAROUND */
+ if (!(flags & CE_SEND_FLAG_GATHER))
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+
+ src_ring->write_index = write_index;
+exit:
+ return ret;
+}
+
+static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ struct ce_desc_64 *desc, sdesc;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index = src_ring->write_index;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ __le32 *addr;
+ u32 desc_flags = 0;
+ int ret = 0;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ if (nbytes > ce_state->src_sz_max)
+ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
+ __func__, nbytes, ce_state->src_sz_max);
+
+ if (ar->hw_params.rri_on_ddr)
+ sw_index = ath10k_ce_src_ring_read_index_from_ddr(ar, ce_state->id);
+ else
+ sw_index = src_ring->sw_index;
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) <= 0)) {
+ ret = -ENOSR;
+ goto exit;
+ }
+
+ desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+ write_index);
+
+ desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
+
+ if (flags & CE_SEND_FLAG_GATHER)
+ desc_flags |= CE_DESC_FLAGS_GATHER;
+
+ if (flags & CE_SEND_FLAG_BYTE_SWAP)
+ desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
+
+ addr = (__le32 *)&sdesc.addr;
+
+ flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+ addr[0] = __cpu_to_le32(buffer);
+ addr[1] = __cpu_to_le32(flags);
+ if (flags & CE_SEND_FLAG_GATHER)
+ addr[1] |= __cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER);
+ else
+ addr[1] &= ~(__cpu_to_le32(CE_WCN3990_DESC_FLAGS_GATHER));
+
+ sdesc.nbytes = __cpu_to_le16(nbytes);
+ sdesc.flags = __cpu_to_le16(desc_flags);
+
+ *desc = sdesc;
+
+ src_ring->per_transfer_context[write_index] = per_transfer_context;
+
+ /* Update Source Ring Write Index */
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+
+ if (!(flags & CE_SEND_FLAG_GATHER)) {
+ if (ar->hw_params.shadow_reg_support)
+ ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
+ write_index);
+ else
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
+ write_index);
+ }
+
+ src_ring->write_index = write_index;
+exit:
+ return ret;
+}
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ return ce_state->ops->ce_send_nolock(ce_state, per_transfer_context,
+ buffer, nbytes, transfer_id, flags);
+}
+EXPORT_SYMBOL(ath10k_ce_send_nolock);
+
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *src_ring = pipe->src_ring;
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ /*
+ * This function must be called only if there is an incomplete
+ * scatter-gather transfer (before index register is updated)
+ * that needs to be cleaned up.
+ */
+ if (WARN_ON_ONCE(src_ring->write_index == src_ring->sw_index))
+ return;
+
+ if (WARN_ON_ONCE(src_ring->write_index ==
+ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr)))
+ return;
+
+ src_ring->write_index--;
+ src_ring->write_index &= src_ring->nentries_mask;
+
+ src_ring->per_transfer_context[src_ring->write_index] = NULL;
+}
+EXPORT_SYMBOL(__ath10k_ce_send_revert);
+
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
+ buffer, nbytes, transfer_id, flags);
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_send);
+
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int delta;
+
+ spin_lock_bh(&ce->ce_lock);
+ delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
+ pipe->src_ring->write_index,
+ pipe->src_ring->sw_index - 1);
+ spin_unlock_bh(&ce->ce_lock);
+
+ return delta;
+}
+EXPORT_SYMBOL(ath10k_ce_num_free_src_entries);
+
+int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
+}
+EXPORT_SYMBOL(__ath10k_ce_rx_num_free_bufs);
+
+static int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ if ((pipe->id != 5) &&
+ CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ return -ENOSPC;
+
+ desc->addr = __cpu_to_le32(paddr);
+ desc->nbytes = 0;
+
+ dest_ring->per_transfer_context[write_index] = ctx;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+
+ return 0;
+}
+
+static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
+ void *ctx,
+ dma_addr_t paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, write_index);
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ lockdep_assert_held(&ce->ce_lock);
+
+ if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ return -ENOSPC;
+
+ desc->addr = __cpu_to_le64(paddr);
+ desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
+
+ desc->nbytes = 0;
+
+ dest_ring->per_transfer_context[write_index] = ctx;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+
+ return 0;
+}
+
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ u32 ctrl_addr = pipe->ctrl_addr;
+ u32 cur_write_idx = ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+
+ /* Prevent CE ring stuck issue that will occur when ring is full.
+ * Make sure that write index is 1 less than read index.
+ */
+ if (((cur_write_idx + nentries) & nentries_mask) == dest_ring->sw_index)
+ nentries -= 1;
+
+ write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+}
+EXPORT_SYMBOL(ath10k_ce_rx_update_write_idx);
+
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = pipe->ops->ce_rx_post_buf(pipe, ctx, paddr);
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_rx_post_buf);
+
+/*
+ * Guts of ath10k_ce_completed_recv_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+static int
+ _ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
+{
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int sw_index = dest_ring->sw_index;
+
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+ struct ce_desc sdesc;
+ u16 nbytes;
+
+ /* Copy in one go for performance reasons */
+ sdesc = *desc;
+
+ nbytes = __le16_to_cpu(sdesc.nbytes);
+ if (nbytes == 0) {
+ /*
+ * This closes a relatively unusual race where the Host
+ * sees the updated DRRI before the update to the
+ * corresponding descriptor has completed. We treat this
+ * as a descriptor that is not yet done.
+ */
+ return -EIO;
+ }
+
+ desc->nbytes = 0;
+
+ /* Return data from completed destination descriptor */
+ *nbytesp = nbytes;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+static int
+_ath10k_ce_completed_recv_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
+{
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, sw_index);
+ struct ce_desc_64 sdesc;
+ u16 nbytes;
+
+ /* Copy in one go for performance reasons */
+ sdesc = *desc;
+
+ nbytes = __le16_to_cpu(sdesc.nbytes);
+ if (nbytes == 0) {
+ /* This closes a relatively unusual race where the Host
+ * sees the updated DRRI before the update to the
+ * corresponding descriptor has completed. We treat this
+ * as a descriptor that is not yet done.
+ */
+ return -EIO;
+ }
+
+ desc->nbytes = 0;
+
+ /* Return data from completed destination descriptor */
+ *nbytesp = nbytes;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_ctx,
+ unsigned int *nbytesp)
+{
+ return ce_state->ops->ce_completed_recv_next_nolock(ce_state,
+ per_transfer_ctx,
+ nbytesp);
+}
+EXPORT_SYMBOL(ath10k_ce_completed_recv_next_nolock);
+
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ce_state->ops->ce_completed_recv_next_nolock(ce_state,
+ per_transfer_contextp,
+ nbytesp);
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_completed_recv_next);
+
+static int _ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ struct ath10k_ce_ring *dest_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_ce *ce;
+
+ dest_ring = ce_state->dest_ring;
+
+ if (!dest_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ce = ath10k_ce_priv(ar);
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = dest_ring->nentries_mask;
+ sw_index = dest_ring->sw_index;
+ write_index = dest_ring->write_index;
+ if (write_index != sw_index) {
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+
+static int _ath10k_ce_revoke_recv_next_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ struct ath10k_ce_ring *dest_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_ce *ce;
+
+ dest_ring = ce_state->dest_ring;
+
+ if (!dest_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ce = ath10k_ce_priv(ar);
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = dest_ring->nentries_mask;
+ sw_index = dest_ring->sw_index;
+ write_index = dest_ring->write_index;
+ if (write_index != sw_index) {
+ struct ce_desc_64 *base = dest_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_DEST_RING_TO_DESC_64(base, sw_index);
+
+ /* Return data from completed destination descriptor */
+ *bufferp = __le64_to_cpu(desc->addr);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ dest_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ dest_ring->per_transfer_context[sw_index] = NULL;
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ dest_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp)
+{
+ return ce_state->ops->ce_revoke_recv_next(ce_state,
+ per_transfer_contextp,
+ bufferp);
+}
+EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
+
+/*
+ * Guts of ath10k_ce_completed_send_next.
+ * The caller takes responsibility for any necessary locking.
+ */
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int read_index;
+ struct ce_desc *desc;
+
+ if (src_ring->hw_index == sw_index) {
+ /*
+ * The SW completion index has caught up with the cached
+ * version of the HW completion index.
+ * Update the cached HW completion index to see whether
+ * the SW has really caught up to the HW, or if the cached
+ * value of the HW index has become stale.
+ */
+
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ if (read_index == 0xffffffff)
+ return -ENODEV;
+
+ read_index &= nentries_mask;
+ src_ring->hw_index = read_index;
+ }
+
+ if (ar->hw_params.rri_on_ddr)
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ else
+ read_index = src_ring->hw_index;
+
+ if (read_index == sw_index)
+ return -EIO;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+ desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
+ sw_index);
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
+
+static void ath10k_ce_extract_desc_data(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index,
+ dma_addr_t *bufferp,
+ u32 *nbytesp,
+ u32 *transfer_idp)
+{
+ struct ce_desc *base = src_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le32_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+}
+
+static void ath10k_ce_extract_desc_data_64(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index,
+ dma_addr_t *bufferp,
+ u32 *nbytesp,
+ u32 *transfer_idp)
+{
+ struct ce_desc_64 *base = src_ring->base_addr_owner_space;
+ struct ce_desc_64 *desc =
+ CE_SRC_RING_TO_DESC_64(base, sw_index);
+
+ /* Return data from completed source descriptor */
+ *bufferp = __le64_to_cpu(desc->addr);
+ *nbytesp = __le16_to_cpu(desc->nbytes);
+ *transfer_idp = MS(__le16_to_cpu(desc->flags),
+ CE_DESC_FLAGS_META_DATA);
+}
+
+/* NB: Modeled after ath10k_ce_completed_send_next */
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp)
+{
+ struct ath10k_ce_ring *src_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int ret;
+ struct ath10k *ar;
+ struct ath10k_ce *ce;
+
+ src_ring = ce_state->src_ring;
+
+ if (!src_ring)
+ return -EIO;
+
+ ar = ce_state->ar;
+ ce = ath10k_ce_priv(ar);
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
+ if (write_index != sw_index) {
+ ce_state->ops->ce_extract_desc_data(ar, src_ring, sw_index,
+ bufferp, nbytesp,
+ transfer_idp);
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+ ret = 0;
+ } else {
+ ret = -EIO;
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_cancel_send_next);
+
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ath10k_ce_completed_send_next_nolock(ce_state,
+ per_transfer_contextp);
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath10k_ce_completed_send_next);
+
+/*
+ * Guts of interrupt handler for per-engine interrupts on a particular CE.
+ *
+ * Invokes registered callbacks for recv_complete,
+ * send_complete, and watermarks.
+ */
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+
+ spin_lock_bh(&ce->ce_lock);
+
+ /* Clear the copy-complete interrupts that will be handled here. */
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
+ wm_regs->cc_mask);
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ if (ce_state->recv_cb)
+ ce_state->recv_cb(ce_state);
+
+ if (ce_state->send_cb)
+ ce_state->send_cb(ce_state);
+
+ spin_lock_bh(&ce->ce_lock);
+
+ /*
+ * Misc CE interrupts are not being handled, but still need
+ * to be cleared.
+ */
+ ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
+
+ spin_unlock_bh(&ce->ce_lock);
+}
+EXPORT_SYMBOL(ath10k_ce_per_engine_service);
+
+/*
+ * Handler for per-engine interrupts on ALL active CEs.
+ * This is used in cases where the system is sharing a
+ * single interrput for all CEs
+ */
+
+void ath10k_ce_per_engine_service_any(struct ath10k *ar)
+{
+ int ce_id;
+ u32 intr_summary;
+
+ intr_summary = ath10k_ce_interrupt_summary(ar);
+
+ for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
+ if (intr_summary & (1 << ce_id))
+ intr_summary &= ~(1 << ce_id);
+ else
+ /* no intr pending on this CE */
+ continue;
+
+ ath10k_ce_per_engine_service(ar, ce_id);
+ }
+}
+EXPORT_SYMBOL(ath10k_ce_per_engine_service_any);
+
+/*
+ * Adjust interrupts for the copy complete handler.
+ * If it's needed for either send or recv, then unmask
+ * this interrupt; otherwise, mask it.
+ *
+ * Called with ce_lock held.
+ */
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
+{
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
+
+ if ((!disable_copy_compl_intr) &&
+ (ce_state->send_cb || ce_state->recv_cb))
+ ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr);
+ else
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+}
+
+int ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
+
+void ath10k_ce_enable_interrupts(struct ath10k *ar)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ce_id;
+ struct ath10k_ce_pipe *ce_state;
+
+ /* Skip the last copy engine, CE7 the diagnostic window, as that
+ * uses polling and isn't initialized for interrupts.
+ */
+ for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++) {
+ ce_state = &ce->ce_states[ce_id];
+ ath10k_ce_per_engine_handler_adjust(ce_state);
+ }
+}
+EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
+
+static int ath10k_ce_init_src_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ nentries = roundup_pow_of_two(attr->src_nentries);
+
+ if (ar->hw_params.target_64bit)
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc_64));
+ else
+ memset(src_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
+
+ src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ src_ring->sw_index &= src_ring->nentries_mask;
+ src_ring->hw_index = src_ring->sw_index;
+
+ src_ring->write_index =
+ ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+ src_ring->write_index &= src_ring->nentries_mask;
+
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ src_ring->base_addr_ce_space);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
+ ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot init ce src ring id %d entries %d base_addr %pK\n",
+ ce_id, nentries, src_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static int ath10k_ce_init_dest_ring(struct ath10k *ar,
+ unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
+ u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ if (ar->hw_params.target_64bit)
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc_64));
+ else
+ memset(dest_ring->base_addr_owner_space, 0,
+ nentries * sizeof(struct ce_desc));
+
+ dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->sw_index &= dest_ring->nentries_mask;
+ dest_ring->write_index =
+ ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ dest_ring->write_index &= dest_ring->nentries_mask;
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ dest_ring->base_addr_ce_space);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
+ ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot ce dest ring id %d entries %d base_addr %pK\n",
+ ce_id, nentries, dest_ring->base_addr_owner_space);
+
+ return 0;
+}
+
+static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 nentries)
+{
+ src_ring->shadow_base_unaligned = kcalloc(nentries,
+ sizeof(struct ce_desc_64),
+ GFP_KERNEL);
+ if (!src_ring->shadow_base_unaligned)
+ return -ENOMEM;
+
+ src_ring->shadow_base = (struct ce_desc_64 *)
+ PTR_ALIGN(src_ring->shadow_base_unaligned,
+ CE_DESC_RING_ALIGN);
+ return 0;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+ int ret;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(sizeof(*src_ring) +
+ (nentries *
+ sizeof(*src_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (src_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
+
+ /*
+ * Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ src_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->base_addr_ce_space_unaligned = base_addr;
+
+ src_ring->base_addr_owner_space =
+ PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space =
+ ALIGN(src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ if (ar->hw_params.shadow_reg_support) {
+ ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+ if (ret) {
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space_unaligned,
+ base_addr);
+ kfree(src_ring);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_src_ring_64(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *src_ring;
+ u32 nentries = attr->src_nentries;
+ dma_addr_t base_addr;
+ int ret;
+
+ nentries = roundup_pow_of_two(nentries);
+
+ src_ring = kzalloc(sizeof(*src_ring) +
+ (nentries *
+ sizeof(*src_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (!src_ring)
+ return ERR_PTR(-ENOMEM);
+
+ src_ring->nentries = nentries;
+ src_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ src_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!src_ring->base_addr_owner_space_unaligned) {
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->base_addr_ce_space_unaligned = base_addr;
+
+ src_ring->base_addr_owner_space =
+ PTR_ALIGN(src_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ src_ring->base_addr_ce_space =
+ ALIGN(src_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ if (ar->hw_params.shadow_reg_support) {
+ ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
+ if (ret) {
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space_unaligned,
+ base_addr);
+ kfree(src_ring);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return src_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *dest_ring;
+ u32 nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ dest_ring = kzalloc(sizeof(*dest_ring) +
+ (nentries *
+ sizeof(*dest_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (dest_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ dest_ring->nentries = nentries;
+ dest_ring->nentries_mask = nentries - 1;
+
+ /*
+ * Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ dest_ring->base_addr_owner_space_unaligned =
+ dma_zalloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+ dest_ring->base_addr_owner_space =
+ PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space =
+ ALIGN(dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return dest_ring;
+}
+
+static struct ath10k_ce_ring *
+ath10k_ce_alloc_dest_ring_64(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce_ring *dest_ring;
+ u32 nentries;
+ dma_addr_t base_addr;
+
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+
+ dest_ring = kzalloc(sizeof(*dest_ring) +
+ (nentries *
+ sizeof(*dest_ring->per_transfer_context)),
+ GFP_KERNEL);
+ if (!dest_ring)
+ return ERR_PTR(-ENOMEM);
+
+ dest_ring->nentries = nentries;
+ dest_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ dest_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ &base_addr, GFP_KERNEL);
+ if (!dest_ring->base_addr_owner_space_unaligned) {
+ kfree(dest_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dest_ring->base_addr_ce_space_unaligned = base_addr;
+
+ /* Correctly initialize memory to 0 to prevent garbage
+ * data crashing system when download firmware
+ */
+ memset(dest_ring->base_addr_owner_space_unaligned, 0,
+ nentries * sizeof(struct ce_desc_64) + CE_DESC_RING_ALIGN);
+
+ dest_ring->base_addr_owner_space =
+ PTR_ALIGN(dest_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ dest_ring->base_addr_ce_space =
+ ALIGN(dest_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return dest_ring;
+}
+
+/*
+ * Initialize a Copy Engine based on caller-supplied attributes.
+ * This may be called once to initialize both source and destination
+ * rings or it may be called twice for separate source and destination
+ * initialization. It may be that only one side or the other is
+ * initialized by software/firmware.
+ */
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr)
+{
+ int ret;
+
+ if (attr->src_nentries) {
+ ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
+ if (ret) {
+ ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
+ ce_id, ret);
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
+ if (ret) {
+ ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
+ ce_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_ce_init_pipe);
+
+static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
+{
+ u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+
+ ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
+}
+
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
+{
+ ath10k_ce_deinit_src_ring(ar, ce_id);
+ ath10k_ce_deinit_dest_ring(ar, ce_id);
+}
+EXPORT_SYMBOL(ath10k_ce_deinit_pipe);
+
+static void _ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+ if (ce_state->src_ring) {
+ if (ar->hw_params.shadow_reg_support)
+ kfree(ce_state->src_ring->shadow_base_unaligned);
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ }
+
+ if (ce_state->dest_ring) {
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
+ kfree(ce_state->dest_ring);
+ }
+
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
+}
+
+static void _ath10k_ce_free_pipe_64(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+ if (ce_state->src_ring) {
+ if (ar->hw_params.shadow_reg_support)
+ kfree(ce_state->src_ring->shadow_base_unaligned);
+ dma_free_coherent(ar->dev,
+ (ce_state->src_ring->nentries *
+ sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ ce_state->src_ring->base_addr_owner_space,
+ ce_state->src_ring->base_addr_ce_space);
+ kfree(ce_state->src_ring);
+ }
+
+ if (ce_state->dest_ring) {
+ dma_free_coherent(ar->dev,
+ (ce_state->dest_ring->nentries *
+ sizeof(struct ce_desc_64) +
+ CE_DESC_RING_ALIGN),
+ ce_state->dest_ring->base_addr_owner_space,
+ ce_state->dest_ring->base_addr_ce_space);
+ kfree(ce_state->dest_ring);
+ }
+
+ ce_state->src_ring = NULL;
+ ce_state->dest_ring = NULL;
+}
+
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+
+ ce_state->ops->ce_free_pipe(ar, ce_id);
+}
+EXPORT_SYMBOL(ath10k_ce_free_pipe);
+
+void ath10k_ce_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_crash_data ce_data;
+ u32 addr, id;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_err(ar, "Copy Engine register dump:\n");
+
+ spin_lock_bh(&ce->ce_lock);
+ for (id = 0; id < CE_COUNT; id++) {
+ addr = ath10k_ce_base_address(ar, id);
+ ce_data.base_addr = cpu_to_le32(addr);
+
+ ce_data.src_wr_idx =
+ cpu_to_le32(ath10k_ce_src_ring_write_index_get(ar, addr));
+ ce_data.src_r_idx =
+ cpu_to_le32(ath10k_ce_src_ring_read_index_get(ar, addr));
+ ce_data.dst_wr_idx =
+ cpu_to_le32(ath10k_ce_dest_ring_write_index_get(ar, addr));
+ ce_data.dst_r_idx =
+ cpu_to_le32(ath10k_ce_dest_ring_read_index_get(ar, addr));
+
+ if (crash_data)
+ crash_data->ce_crash_data[id] = ce_data;
+
+ ath10k_err(ar, "[%02d]: 0x%08x %3u %3u %3u %3u", id,
+ le32_to_cpu(ce_data.base_addr),
+ le32_to_cpu(ce_data.src_wr_idx),
+ le32_to_cpu(ce_data.src_r_idx),
+ le32_to_cpu(ce_data.dst_wr_idx),
+ le32_to_cpu(ce_data.dst_r_idx));
+ }
+
+ spin_unlock_bh(&ce->ce_lock);
+}
+EXPORT_SYMBOL(ath10k_ce_dump_registers);
+
+static const struct ath10k_ce_ops ce_ops = {
+ .ce_alloc_src_ring = ath10k_ce_alloc_src_ring,
+ .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring,
+ .ce_rx_post_buf = __ath10k_ce_rx_post_buf,
+ .ce_completed_recv_next_nolock = _ath10k_ce_completed_recv_next_nolock,
+ .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next,
+ .ce_extract_desc_data = ath10k_ce_extract_desc_data,
+ .ce_free_pipe = _ath10k_ce_free_pipe,
+ .ce_send_nolock = _ath10k_ce_send_nolock,
+};
+
+static const struct ath10k_ce_ops ce_64_ops = {
+ .ce_alloc_src_ring = ath10k_ce_alloc_src_ring_64,
+ .ce_alloc_dst_ring = ath10k_ce_alloc_dest_ring_64,
+ .ce_rx_post_buf = __ath10k_ce_rx_post_buf_64,
+ .ce_completed_recv_next_nolock =
+ _ath10k_ce_completed_recv_next_nolock_64,
+ .ce_revoke_recv_next = _ath10k_ce_revoke_recv_next_64,
+ .ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
+ .ce_free_pipe = _ath10k_ce_free_pipe_64,
+ .ce_send_nolock = _ath10k_ce_send_nolock_64,
+};
+
+static void ath10k_ce_set_ops(struct ath10k *ar,
+ struct ath10k_ce_pipe *ce_state)
+{
+ switch (ar->hw_rev) {
+ case ATH10K_HW_WCN3990:
+ ce_state->ops = &ce_64_ops;
+ break;
+ default:
+ ce_state->ops = &ce_ops;
+ break;
+ }
+}
+
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ int ret;
+
+ ath10k_ce_set_ops(ar, ce_state);
+ /* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
+ * additional TX locking checks.
+ *
+ * For the lack of a better place do the check here.
+ */
+ BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2 * TARGET_10_4_NUM_MSDU_DESC_PFC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+ BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
+ (CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
+
+ ce_state->ar = ar;
+ ce_state->id = ce_id;
+ ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ ce_state->attr_flags = attr->flags;
+ ce_state->src_sz_max = attr->src_sz_max;
+
+ if (attr->src_nentries)
+ ce_state->send_cb = attr->send_cb;
+
+ if (attr->dest_nentries)
+ ce_state->recv_cb = attr->recv_cb;
+
+ if (attr->src_nentries) {
+ ce_state->src_ring =
+ ce_state->ops->ce_alloc_src_ring(ar, ce_id, attr);
+ if (IS_ERR(ce_state->src_ring)) {
+ ret = PTR_ERR(ce_state->src_ring);
+ ath10k_err(ar, "failed to alloc CE src ring %d: %d\n",
+ ce_id, ret);
+ ce_state->src_ring = NULL;
+ return ret;
+ }
+ }
+
+ if (attr->dest_nentries) {
+ ce_state->dest_ring = ce_state->ops->ce_alloc_dst_ring(ar,
+ ce_id,
+ attr);
+ if (IS_ERR(ce_state->dest_ring)) {
+ ret = PTR_ERR(ce_state->dest_ring);
+ ath10k_err(ar, "failed to alloc CE dest ring %d: %d\n",
+ ce_id, ret);
+ ce_state->dest_ring = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_ce_alloc_pipe);
+
+void ath10k_ce_alloc_rri(struct ath10k *ar)
+{
+ int i;
+ u32 value;
+ u32 ctrl1_regs;
+ u32 ce_base_addr;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ ce->vaddr_rri = dma_alloc_coherent(ar->dev,
+ (CE_COUNT * sizeof(u32)),
+ &ce->paddr_rri, GFP_KERNEL);
+
+ if (!ce->vaddr_rri)
+ return;
+
+ ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_low,
+ lower_32_bits(ce->paddr_rri));
+ ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
+ (upper_32_bits(ce->paddr_rri) &
+ CE_DESC_FLAGS_GET_MASK));
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;
+ ce_base_addr = ath10k_ce_base_address(ar, i);
+ value = ath10k_ce_read32(ar, ce_base_addr + ctrl1_regs);
+ value |= ar->hw_ce_regs->upd->mask;
+ ath10k_ce_write32(ar, ce_base_addr + ctrl1_regs, value);
+ }
+
+ memset(ce->vaddr_rri, 0, CE_COUNT * sizeof(u32));
+}
+EXPORT_SYMBOL(ath10k_ce_alloc_rri);
+
+void ath10k_ce_free_rri(struct ath10k *ar)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ dma_free_coherent(ar->dev, (CE_COUNT * sizeof(u32)),
+ ce->vaddr_rri,
+ ce->paddr_rri);
+}
+EXPORT_SYMBOL(ath10k_ce_free_rri);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
new file mode 100644
index 000000000..8088f7a66
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -0,0 +1,428 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CE_H_
+#define _CE_H_
+
+#include "hif.h"
+
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
+
+/* Descriptor rings must be aligned to this boundary */
+#define CE_DESC_RING_ALIGN 8
+#define CE_SEND_FLAG_GATHER 0x00010000
+
+/*
+ * Copy Engine support: low-level Target-side Copy Engine API.
+ * This is a hardware access layer used by code that understands
+ * how to use copy engines.
+ */
+
+struct ath10k_ce_pipe;
+
+#define CE_DESC_FLAGS_GATHER (1 << 0)
+#define CE_DESC_FLAGS_BYTE_SWAP (1 << 1)
+#define CE_WCN3990_DESC_FLAGS_GATHER BIT(31)
+
+#define CE_DESC_FLAGS_GET_MASK GENMASK(4, 0)
+#define CE_DESC_37BIT_ADDR_MASK GENMASK_ULL(37, 0)
+
+/* Following desc flags are used in QCA99X0 */
+#define CE_DESC_FLAGS_HOST_INT_DIS (1 << 2)
+#define CE_DESC_FLAGS_TGT_INT_DIS (1 << 3)
+
+#define CE_DESC_FLAGS_META_DATA_MASK ar->hw_values->ce_desc_meta_data_mask
+#define CE_DESC_FLAGS_META_DATA_LSB ar->hw_values->ce_desc_meta_data_lsb
+
+#define CE_DDR_RRI_MASK GENMASK(15, 0)
+#define CE_DDR_DRRI_SHIFT 16
+
+struct ce_desc {
+ __le32 addr;
+ __le16 nbytes;
+ __le16 flags; /* %CE_DESC_FLAGS_ */
+};
+
+struct ce_desc_64 {
+ __le64 addr;
+ __le16 nbytes; /* length in register map */
+ __le16 flags; /* fw_metadata_high */
+ __le32 toeplitz_hash_result;
+};
+
+#define CE_DESC_SIZE sizeof(struct ce_desc)
+#define CE_DESC_SIZE_64 sizeof(struct ce_desc_64)
+
+struct ath10k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /*
+ * For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+ /*
+ * For src ring, this is the next index not yet processed by HW.
+ * This is a cached copy of the real HW index (read index), used
+ * for avoiding reading the HW index register more often than
+ * necessary.
+ * This extends the invariant:
+ * write index >= read index >= hw_index >= sw_index
+ *
+ * For dest ring, this is currently unused.
+ */
+ /* cached copy */
+ unsigned int hw_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ u32 base_addr_ce_space_unaligned;
+
+ /*
+ * Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ u32 base_addr_ce_space;
+
+ char *shadow_base_unaligned;
+ struct ce_desc_64 *shadow_base;
+
+ /* keep last */
+ void *per_transfer_context[0];
+};
+
+struct ath10k_ce_pipe {
+ struct ath10k *ar;
+ unsigned int id;
+
+ unsigned int attr_flags;
+
+ u32 ctrl_addr;
+
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
+
+ unsigned int src_sz_max;
+ struct ath10k_ce_ring *src_ring;
+ struct ath10k_ce_ring *dest_ring;
+ const struct ath10k_ce_ops *ops;
+};
+
+/* Copy Engine settable attributes */
+struct ce_attr;
+
+struct ath10k_bus_ops {
+ u32 (*read32)(struct ath10k *ar, u32 offset);
+ void (*write32)(struct ath10k *ar, u32 offset, u32 value);
+ int (*get_num_banks)(struct ath10k *ar);
+};
+
+static inline struct ath10k_ce *ath10k_ce_priv(struct ath10k *ar)
+{
+ return (struct ath10k_ce *)ar->ce_priv;
+}
+
+struct ath10k_ce {
+ /* protects CE info */
+ spinlock_t ce_lock;
+ const struct ath10k_bus_ops *bus_ops;
+ struct ath10k_ce_pipe ce_states[CE_COUNT_MAX];
+ u32 *vaddr_rri;
+ dma_addr_t paddr_rri;
+};
+
+/*==================Send====================*/
+
+/* ath10k_ce_send flags */
+#define CE_SEND_FLAG_BYTE_SWAP 1
+
+/*
+ * Queue a source buffer to be sent to an anonymous destination buffer.
+ * ce - which copy engine to use
+ * buffer - address of buffer
+ * nbytes - number of bytes to send
+ * transfer_id - arbitrary ID; reflected to destination
+ * flags - CE_SEND_FLAG_* values
+ * Returns 0 on success; otherwise an error status.
+ *
+ * Note: If no flags are specified, use CE's default data swap mode.
+ *
+ * Implementation note: pushes 1 buffer to Source ring
+ */
+int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_send_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ /* 14 bits */
+ unsigned int transfer_id,
+ unsigned int flags);
+
+int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
+ void *per_transfer_context,
+ dma_addr_t buffer,
+ unsigned int nbytes,
+ unsigned int transfer_id,
+ unsigned int flags);
+
+void __ath10k_ce_send_revert(struct ath10k_ce_pipe *pipe);
+
+int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
+
+/*==================Recv=======================*/
+
+int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr);
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
+
+/* recv flags */
+/* Data is byte-swapped */
+#define CE_RECV_FLAG_SWAPPED 1
+
+/*
+ * Supply data for the next completed unprocessed receive descriptor.
+ * Pops buffer from Dest ring.
+ */
+int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp);
+/*
+ * Supply data for the next completed unprocessed send descriptor.
+ * Pops 1 completed send buffer from Source ring.
+ */
+int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp);
+
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp);
+
+/*==================CE Engine Initialization=======================*/
+
+int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
+ const struct ce_attr *attr);
+void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
+
+/*==================CE Engine Shutdown=======================*/
+/*
+ * Support clean shutdown by allowing the caller to revoke
+ * receive buffers. Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp);
+
+int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ unsigned int *nbytesp);
+
+/*
+ * Support clean shutdown by allowing the caller to cancel
+ * pending sends. Target DMA must be stopped before using
+ * this API.
+ */
+int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *bufferp,
+ unsigned int *nbytesp,
+ unsigned int *transfer_idp);
+
+/*==================CE Interrupt Handlers====================*/
+void ath10k_ce_per_engine_service_any(struct ath10k *ar);
+void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
+int ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_enable_interrupts(struct ath10k *ar);
+void ath10k_ce_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data);
+void ath10k_ce_alloc_rri(struct ath10k *ar);
+void ath10k_ce_free_rri(struct ath10k *ar);
+
+/* ce_attr.flags values */
+/* Use NonSnooping PCIe accesses? */
+#define CE_ATTR_NO_SNOOP 1
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* Swizzle descriptors? */
+#define CE_ATTR_SWIZZLE_DESCRIPTORS 4
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Attributes of an instance of a Copy Engine */
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /*
+ * Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ void (*send_cb)(struct ath10k_ce_pipe *);
+ void (*recv_cb)(struct ath10k_ce_pipe *);
+};
+
+struct ath10k_ce_ops {
+ struct ath10k_ce_ring *(*ce_alloc_src_ring)(struct ath10k *ar,
+ u32 ce_id,
+ const struct ce_attr *attr);
+ struct ath10k_ce_ring *(*ce_alloc_dst_ring)(struct ath10k *ar,
+ u32 ce_id,
+ const struct ce_attr *attr);
+ int (*ce_rx_post_buf)(struct ath10k_ce_pipe *pipe, void *ctx,
+ dma_addr_t paddr);
+ int (*ce_completed_recv_next_nolock)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ u32 *nbytesp);
+ int (*ce_revoke_recv_next)(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp,
+ dma_addr_t *nbytesp);
+ void (*ce_extract_desc_data)(struct ath10k *ar,
+ struct ath10k_ce_ring *src_ring,
+ u32 sw_index, dma_addr_t *bufferp,
+ u32 *nbytesp, u32 *transfer_idp);
+ void (*ce_free_pipe)(struct ath10k *ar, int ce_id);
+ int (*ce_send_nolock)(struct ath10k_ce_pipe *pipe,
+ void *per_transfer_context,
+ dma_addr_t buffer, u32 nbytes,
+ u32 transfer_id, u32 flags);
+};
+
+static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
+{
+ return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
+}
+
+#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) (((COPY_ENGINE_BASE_ADDRESS) \
+ - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS))
+
+#define CE_SRC_RING_TO_DESC(baddr, idx) \
+ (&(((struct ce_desc *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC(baddr, idx) \
+ (&(((struct ce_desc *)baddr)[idx]))
+
+#define CE_SRC_RING_TO_DESC_64(baddr, idx) \
+ (&(((struct ce_desc_64 *)baddr)[idx]))
+
+#define CE_DEST_RING_TO_DESC_64(baddr, idx) \
+ (&(((struct ce_desc_64 *)baddr)[idx]))
+
+/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
+#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
+ (((int)(toidx) - (int)(fromidx)) & (nentries_mask))
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
+ (((idx) + (num)) & (nentries_mask))
+
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
+ ar->regs->ce_wrap_intr_sum_host_msi_lsb
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK \
+ ar->regs->ce_wrap_intr_sum_host_msi_mask
+#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(x) \
+ (((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
+ CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
+#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
+#define CE_INTERRUPT_SUMMARY (GENMASK(CE_COUNT_MAX - 1, 0))
+
+static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ if (!ar->hw_params.per_ce_irq)
+ return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
+ ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
+ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
+ else
+ return CE_INTERRUPT_SUMMARY;
+}
+
+/* Host software's Copy Engine configuration. */
+#define CE_ATTR_FLAGS 0
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+
+/* Establish a mapping between a service/direction and a pipe. */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+#endif /* _CE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
new file mode 100644
index 000000000..436eac342
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -0,0 +1,2919 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+#include <linux/dmi.h>
+#include <linux/ctype.h>
+#include <asm/byteorder.h>
+
+#include "core.h"
+#include "mac.h"
+#include "htc.h"
+#include "hif.h"
+#include "wmi.h"
+#include "bmi.h"
+#include "debug.h"
+#include "htt.h"
+#include "testmode.h"
+#include "wmi-ops.h"
+#include "coredump.h"
+
+unsigned int ath10k_debug_mask;
+static unsigned int ath10k_cryptmode_param;
+static bool uart_print;
+static bool skip_otp;
+static bool rawmode;
+
+unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) |
+ BIT(ATH10K_FW_CRASH_DUMP_CE_DATA);
+
+/* FIXME: most of these should be readonly */
+module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
+module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
+module_param(uart_print, bool, 0644);
+module_param(skip_otp, bool, 0644);
+module_param(rawmode, bool, 0644);
+module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
+
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+MODULE_PARM_DESC(uart_print, "Uart target debugging");
+MODULE_PARM_DESC(skip_otp, "Skip otp failure for calibration in testmode");
+MODULE_PARM_DESC(cryptmode, "Crypto mode: 0-hardware, 1-software");
+MODULE_PARM_DESC(rawmode, "Use raw 802.11 frame datapath");
+MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
+
+static const struct ath10k_hw_params ath10k_hw_params_list[] = {
+ {
+ .id = QCA988X_HW_2_0_VERSION,
+ .dev_id = QCA988X_2_0_DEVICE_ID,
+ .name = "qca988x hw2.0",
+ .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA988X_HW_2_0_FW_DIR,
+ .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA988X_BOARD_DATA_SZ,
+ .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA988X_HW_2_0_VERSION,
+ .dev_id = QCA988X_2_0_DEVICE_ID_UBNT,
+ .name = "qca988x hw2.0 ubiquiti",
+ .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA988X_HW_2_0_FW_DIR,
+ .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA988X_BOARD_DATA_SZ,
+ .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA9887_HW_1_0_VERSION,
+ .dev_id = QCA9887_1_0_DEVICE_ID,
+ .name = "qca9887 hw1.0",
+ .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 2116,
+ .fw = {
+ .dir = QCA9887_HW_1_0_FW_DIR,
+ .board = QCA9887_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA9887_BOARD_DATA_SZ,
+ .board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA6174_HW_2_1_VERSION,
+ .dev_id = QCA6164_2_1_DEVICE_ID,
+ .name = "qca6164 hw2.1",
+ .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA6174_HW_2_1_FW_DIR,
+ .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA6174_HW_2_1_VERSION,
+ .dev_id = QCA6174_2_1_DEVICE_ID,
+ .name = "qca6174 hw2.1",
+ .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA6174_HW_2_1_FW_DIR,
+ .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA6174_HW_3_0_VERSION,
+ .dev_id = QCA6174_2_1_DEVICE_ID,
+ .name = "qca6174 hw3.0",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA6174_HW_3_2_VERSION,
+ .dev_id = QCA6174_2_1_DEVICE_ID,
+ .name = "qca6174 hw3.2",
+ .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ /* uses same binaries as hw3.0 */
+ .dir = QCA6174_HW_3_0_FW_DIR,
+ .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
+ .board_size = QCA6174_BOARD_DATA_SZ,
+ .board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca6174_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA99X0_HW_2_0_DEV_VERSION,
+ .dev_id = QCA99X0_2_0_DEVICE_ID,
+ .name = "qca99x0 hw2.0",
+ .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 0xf,
+ .rx_chain_mask = 0xf,
+ .max_spatial_stream = 4,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA99X0_HW_2_0_FW_DIR,
+ .board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ .spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA9984_HW_1_0_DEV_VERSION,
+ .dev_id = QCA9984_1_0_DEVICE_ID,
+ .name = "qca9984/qca9994 hw1.0",
+ .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 0xf,
+ .rx_chain_mask = 0xf,
+ .max_spatial_stream = 4,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA9984_HW_1_0_FW_DIR,
+ .board = QCA9984_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ .spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
+
+ /* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
+ * or 2x2 160Mhz, long-guard-interval.
+ */
+ .vht160_mcs_rx_highest = 1560,
+ .vht160_mcs_tx_highest = 1560,
+ .n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA9888_HW_2_0_DEV_VERSION,
+ .dev_id = QCA9888_2_0_DEVICE_ID,
+ .name = "qca9888 hw2.0",
+ .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
+ .otp_exe_param = 0x00000700,
+ .continuous_frag_desc = true,
+ .channel_counters_freq_hz = 150000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 3,
+ .rx_chain_mask = 3,
+ .max_spatial_stream = 2,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA9888_HW_2_0_FW_DIR,
+ .board = QCA9888_HW_2_0_BOARD_DATA_FILE,
+ .board_size = QCA99X0_BOARD_DATA_SZ,
+ .board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ .spectral_bin_discard = 12,
+ .spectral_bin_offset = 8,
+
+ /* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
+ * 1x1 160Mhz, long-guard-interval.
+ */
+ .vht160_mcs_rx_highest = 780,
+ .vht160_mcs_tx_highest = 780,
+ .n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA9377_HW_1_0_DEV_VERSION,
+ .dev_id = QCA9377_1_0_DEVICE_ID,
+ .name = "qca9377 hw1.0",
+ .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA9377_HW_1_0_FW_DIR,
+ .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA9377_BOARD_DATA_SZ,
+ .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca988x_ops,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA9377_HW_1_1_DEV_VERSION,
+ .dev_id = QCA9377_1_0_DEVICE_ID,
+ .name = "qca9377 hw1.1",
+ .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 6,
+ .otp_exe_param = 0,
+ .channel_counters_freq_hz = 88000,
+ .max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
+ .fw = {
+ .dir = QCA9377_HW_1_0_FW_DIR,
+ .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA9377_BOARD_DATA_SZ,
+ .board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
+ },
+ .hw_ops = &qca6174_ops,
+ .hw_clk = qca6174_clk,
+ .target_cpu_freq = 176000000,
+ .decap_align_bytes = 4,
+ .spectral_bin_discard = 0,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 8,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = QCA4019_HW_1_0_DEV_VERSION,
+ .dev_id = 0,
+ .name = "qca4019 hw1.0",
+ .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
+ .uart_pin = 7,
+ .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
+ .otp_exe_param = 0x0010000,
+ .continuous_frag_desc = true,
+ .cck_rate_map_rev2 = true,
+ .channel_counters_freq_hz = 125000,
+ .max_probe_resp_desc_thres = 24,
+ .tx_chain_mask = 0x3,
+ .rx_chain_mask = 0x3,
+ .max_spatial_stream = 2,
+ .cal_data_len = 12064,
+ .fw = {
+ .dir = QCA4019_HW_1_0_FW_DIR,
+ .board = QCA4019_HW_1_0_BOARD_DATA_FILE,
+ .board_size = QCA4019_BOARD_DATA_SZ,
+ .board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &qca99x0_ops,
+ .decap_align_bytes = 1,
+ .spectral_bin_discard = 4,
+ .spectral_bin_offset = 0,
+ .vht160_mcs_rx_highest = 0,
+ .vht160_mcs_tx_highest = 0,
+ .n_cipher_suites = 11,
+ .num_peers = TARGET_TLV_NUM_PEERS,
+ .ast_skid_limit = 0x10,
+ .num_wds_entries = 0x20,
+ .target_64bit = false,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
+ .per_ce_irq = false,
+ .shadow_reg_support = false,
+ .rri_on_ddr = false,
+ .hw_filter_reset_required = true,
+ },
+ {
+ .id = WCN3990_HW_1_0_DEV_VERSION,
+ .dev_id = 0,
+ .name = "wcn3990 hw1.0",
+ .continuous_frag_desc = true,
+ .tx_chain_mask = 0x7,
+ .rx_chain_mask = 0x7,
+ .max_spatial_stream = 4,
+ .fw = {
+ .dir = WCN3990_HW_1_0_FW_DIR,
+ },
+ .sw_decrypt_mcast_mgmt = true,
+ .hw_ops = &wcn3990_ops,
+ .decap_align_bytes = 1,
+ .num_peers = TARGET_HL_10_TLV_NUM_PEERS,
+ .n_cipher_suites = 11,
+ .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
+ .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
+ .target_64bit = true,
+ .rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
+ .per_ce_irq = true,
+ .shadow_reg_support = true,
+ .rri_on_ddr = true,
+ .hw_filter_reset_required = false,
+ },
+};
+
+static const char *const ath10k_core_fw_feature_str[] = {
+ [ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX] = "wmi-mgmt-rx",
+ [ATH10K_FW_FEATURE_WMI_10X] = "wmi-10.x",
+ [ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX] = "has-wmi-mgmt-tx",
+ [ATH10K_FW_FEATURE_NO_P2P] = "no-p2p",
+ [ATH10K_FW_FEATURE_WMI_10_2] = "wmi-10.2",
+ [ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT] = "multi-vif-ps",
+ [ATH10K_FW_FEATURE_WOWLAN_SUPPORT] = "wowlan",
+ [ATH10K_FW_FEATURE_IGNORE_OTP_RESULT] = "ignore-otp",
+ [ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
+ [ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
+ [ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode",
+ [ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
+ [ATH10K_FW_FEATURE_MFP_SUPPORT] = "mfp",
+ [ATH10K_FW_FEATURE_PEER_FLOW_CONTROL] = "peer-flow-ctrl",
+ [ATH10K_FW_FEATURE_BTCOEX_PARAM] = "btcoex-param",
+ [ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR] = "skip-null-func-war",
+ [ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST] = "allows-mesh-bcast",
+ [ATH10K_FW_FEATURE_NO_PS] = "no-ps",
+ [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference",
+ [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi",
+};
+
+static unsigned int ath10k_core_get_fw_feature_str(char *buf,
+ size_t buf_len,
+ enum ath10k_fw_features feat)
+{
+ /* make sure that ath10k_core_fw_feature_str[] gets updated */
+ BUILD_BUG_ON(ARRAY_SIZE(ath10k_core_fw_feature_str) !=
+ ATH10K_FW_FEATURE_COUNT);
+
+ if (feat >= ARRAY_SIZE(ath10k_core_fw_feature_str) ||
+ WARN_ON(!ath10k_core_fw_feature_str[feat])) {
+ return scnprintf(buf, buf_len, "bit%d", feat);
+ }
+
+ return scnprintf(buf, buf_len, "%s", ath10k_core_fw_feature_str[feat]);
+}
+
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+ char *buf,
+ size_t buf_len)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) {
+ if (len > 0)
+ len += scnprintf(buf + len, buf_len - len, ",");
+
+ len += ath10k_core_get_fw_feature_str(buf + len,
+ buf_len - len,
+ i);
+ }
+ }
+}
+
+static void ath10k_send_suspend_complete(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot suspend complete\n");
+
+ complete(&ar->target_suspend);
+}
+
+static void ath10k_init_sdio(struct ath10k *ar)
+{
+ u32 param = 0;
+
+ ath10k_bmi_write32(ar, hi_mbox_io_block_sz, 256);
+ ath10k_bmi_write32(ar, hi_mbox_isr_yield_limit, 99);
+ ath10k_bmi_read32(ar, hi_acs_flags, &param);
+
+ param |= (HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET |
+ HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET |
+ HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE);
+
+ ath10k_bmi_write32(ar, hi_acs_flags, param);
+}
+
+static int ath10k_init_configure_target(struct ath10k *ar)
+{
+ u32 param_host;
+ int ret;
+
+ /* tell target which HTC version it is used*/
+ ret = ath10k_bmi_write32(ar, hi_app_host_interest,
+ HTC_PROTOCOL_VERSION);
+ if (ret) {
+ ath10k_err(ar, "settings HTC version failed\n");
+ return ret;
+ }
+
+ /* set the firmware mode to STA/IBSS/AP */
+ ret = ath10k_bmi_read32(ar, hi_option_flag, &param_host);
+ if (ret) {
+ ath10k_err(ar, "setting firmware mode (1/2) failed\n");
+ return ret;
+ }
+
+ /* TODO following parameters need to be re-visited. */
+ /* num_device */
+ param_host |= (1 << HI_OPTION_NUM_DEV_SHIFT);
+ /* Firmware mode */
+ /* FIXME: Why FW_MODE_AP ??.*/
+ param_host |= (HI_OPTION_FW_MODE_AP << HI_OPTION_FW_MODE_SHIFT);
+ /* mac_addr_method */
+ param_host |= (1 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
+ /* firmware_bridge */
+ param_host |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
+ /* fwsubmode */
+ param_host |= (0 << HI_OPTION_FW_SUBMODE_SHIFT);
+
+ ret = ath10k_bmi_write32(ar, hi_option_flag, param_host);
+ if (ret) {
+ ath10k_err(ar, "setting firmware mode (2/2) failed\n");
+ return ret;
+ }
+
+ /* We do all byte-swapping on the host */
+ ret = ath10k_bmi_write32(ar, hi_be, 0);
+ if (ret) {
+ ath10k_err(ar, "setting host CPU BE mode failed\n");
+ return ret;
+ }
+
+ /* FW descriptor/Data swap flags */
+ ret = ath10k_bmi_write32(ar, hi_fw_swap, 0);
+
+ if (ret) {
+ ath10k_err(ar, "setting FW data/desc swap flags failed\n");
+ return ret;
+ }
+
+ /* Some devices have a special sanity check that verifies the PCI
+ * Device ID is written to this host interest var. It is known to be
+ * required to boot QCA6164.
+ */
+ ret = ath10k_bmi_write32(ar, hi_hci_uart_pwr_mgmt_params_ext,
+ ar->dev_id);
+ if (ret) {
+ ath10k_err(ar, "failed to set pwr_mgmt_params: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
+ const char *dir,
+ const char *file)
+{
+ char filename[100];
+ const struct firmware *fw;
+ int ret;
+
+ if (file == NULL)
+ return ERR_PTR(-ENOENT);
+
+ if (dir == NULL)
+ dir = ".";
+
+ snprintf(filename, sizeof(filename), "%s/%s", dir, file);
+ ret = firmware_request_nowarn(&fw, filename, ar->dev);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
+ filename, ret);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ return fw;
+}
+
+static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
+ size_t data_len)
+{
+ u32 board_data_size = ar->hw_params.fw.board_size;
+ u32 board_ext_data_size = ar->hw_params.fw.board_ext_size;
+ u32 board_ext_data_addr;
+ int ret;
+
+ ret = ath10k_bmi_read32(ar, hi_board_ext_data, &board_ext_data_addr);
+ if (ret) {
+ ath10k_err(ar, "could not read board ext data addr (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot push board extended data addr 0x%x\n",
+ board_ext_data_addr);
+
+ if (board_ext_data_addr == 0)
+ return 0;
+
+ if (data_len != (board_data_size + board_ext_data_size)) {
+ ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
+ data_len, board_data_size, board_ext_data_size);
+ return -EINVAL;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
+ data + board_data_size,
+ board_ext_data_size);
+ if (ret) {
+ ath10k_err(ar, "could not write board ext data (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_board_ext_data_config,
+ (board_ext_data_size << 16) | 1);
+ if (ret) {
+ ath10k_err(ar, "could not write board ext data bit (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_download_board_data(struct ath10k *ar, const void *data,
+ size_t data_len)
+{
+ u32 board_data_size = ar->hw_params.fw.board_size;
+ u32 address;
+ int ret;
+
+ ret = ath10k_push_board_ext_data(ar, data, data_len);
+ if (ret) {
+ ath10k_err(ar, "could not push board ext data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_read32(ar, hi_board_data, &address);
+ if (ret) {
+ ath10k_err(ar, "could not read board data addr (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write_memory(ar, address, data,
+ min_t(u32, board_data_size,
+ data_len));
+ if (ret) {
+ ath10k_err(ar, "could not write board data (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_board_data_initialized, 1);
+ if (ret) {
+ ath10k_err(ar, "could not write board data bit (%d)\n", ret);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int ath10k_download_cal_file(struct ath10k *ar,
+ const struct firmware *file)
+{
+ int ret;
+
+ if (!file)
+ return -ENOENT;
+
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ ret = ath10k_download_board_data(ar, file->data, file->size);
+ if (ret) {
+ ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
+
+ return 0;
+}
+
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
+{
+ struct device_node *node;
+ int data_len;
+ void *data;
+ int ret;
+
+ node = ar->dev->of_node;
+ if (!node)
+ /* Device Tree is optional, don't print any warnings if
+ * there's no node for ath10k.
+ */
+ return -ENOENT;
+
+ if (!of_get_property(node, dt_name, &data_len)) {
+ /* The calibration data node is optional */
+ return -ENOENT;
+ }
+
+ if (data_len != ar->hw_params.cal_data_len) {
+ ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
+ data_len);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
+ data = kmalloc(data_len, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u8_array(node, dt_name, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from Device Tree: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+out:
+ return ret;
+}
+
+static int ath10k_download_cal_eeprom(struct ath10k *ar)
+{
+ size_t data_len;
+ void *data = NULL;
+ int ret;
+
+ ret = ath10k_hif_fetch_cal_eeprom(ar, &data, &data_len);
+ if (ret) {
+ if (ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to read calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = ath10k_download_board_data(ar, data, data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to download calibration data from EEPROM: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ ret = 0;
+
+out_free:
+ kfree(data);
+
+ return ret;
+}
+
+static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
+{
+ u32 result, address;
+ u8 board_id, chip_id;
+ int ret, bmi_board_id_param;
+
+ address = ar->hw_params.patch_load_addr;
+
+ if (!ar->normal_mode_fw.fw_file.otp_data ||
+ !ar->normal_mode_fw.fw_file.otp_len) {
+ ath10k_warn(ar,
+ "failed to retrieve board id because of invalid otp\n");
+ return -ENODATA;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot upload otp to 0x%x len %zd for board id\n",
+ address, ar->normal_mode_fw.fw_file.otp_len);
+
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len);
+ if (ret) {
+ ath10k_err(ar, "could not write otp for board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+ bmi_board_id_param = BMI_PARAM_GET_FLASH_BOARD_ID;
+ else
+ bmi_board_id_param = BMI_PARAM_GET_EEPROM_BOARD_ID;
+
+ ret = ath10k_bmi_execute(ar, address, bmi_board_id_param, &result);
+ if (ret) {
+ ath10k_err(ar, "could not execute otp for board id check: %d\n",
+ ret);
+ return ret;
+ }
+
+ board_id = MS(result, ATH10K_BMI_BOARD_ID_FROM_OTP);
+ chip_id = MS(result, ATH10K_BMI_CHIP_ID_FROM_OTP);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
+ result, board_id, chip_id);
+
+ if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
+ (board_id == 0)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "board id does not exist in otp, ignore it\n");
+ return -EOPNOTSUPP;
+ }
+
+ ar->id.bmi_ids_valid = true;
+ ar->id.bmi_board_id = board_id;
+ ar->id.bmi_chip_id = chip_id;
+
+ return 0;
+}
+
+static void ath10k_core_check_bdfext(const struct dmi_header *hdr, void *data)
+{
+ struct ath10k *ar = data;
+ const char *bdf_ext;
+ const char *magic = ATH10K_SMBIOS_BDF_EXT_MAGIC;
+ u8 bdf_enabled;
+ int i;
+
+ if (hdr->type != ATH10K_SMBIOS_BDF_EXT_TYPE)
+ return;
+
+ if (hdr->length != ATH10K_SMBIOS_BDF_EXT_LENGTH) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "wrong smbios bdf ext type length (%d).\n",
+ hdr->length);
+ return;
+ }
+
+ bdf_enabled = *((u8 *)hdr + ATH10K_SMBIOS_BDF_EXT_OFFSET);
+ if (!bdf_enabled) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "bdf variant name not found.\n");
+ return;
+ }
+
+ /* Only one string exists (per spec) */
+ bdf_ext = (char *)hdr + hdr->length;
+
+ if (memcmp(bdf_ext, magic, strlen(magic)) != 0) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant magic does not match.\n");
+ return;
+ }
+
+ for (i = 0; i < strlen(bdf_ext); i++) {
+ if (!isascii(bdf_ext[i]) || !isprint(bdf_ext[i])) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant name contains non ascii chars.\n");
+ return;
+ }
+ }
+
+ /* Copy extension name without magic suffix */
+ if (strscpy(ar->id.bdf_ext, bdf_ext + strlen(magic),
+ sizeof(ar->id.bdf_ext)) < 0) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ bdf_ext);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found and validated bdf variant smbios_type 0x%x bdf %s\n",
+ ATH10K_SMBIOS_BDF_EXT_TYPE, bdf_ext);
+}
+
+static int ath10k_core_check_smbios(struct ath10k *ar)
+{
+ ar->id.bdf_ext[0] = '\0';
+ dmi_walk(ath10k_core_check_bdfext, ar);
+
+ if (ar->id.bdf_ext[0] == '\0')
+ return -ENODATA;
+
+ return 0;
+}
+
+static int ath10k_core_check_dt(struct ath10k *ar)
+{
+ struct device_node *node;
+ const char *variant = NULL;
+
+ node = ar->dev->of_node;
+ if (!node)
+ return -ENOENT;
+
+ of_property_read_string(node, "qcom,ath10k-calibration-variant",
+ &variant);
+ if (!variant)
+ return -ENODATA;
+
+ if (strscpy(ar->id.bdf_ext, variant, sizeof(ar->id.bdf_ext)) < 0)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ variant);
+
+ return 0;
+}
+
+static int ath10k_download_and_run_otp(struct ath10k *ar)
+{
+ u32 result, address = ar->hw_params.patch_load_addr;
+ u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
+ int ret;
+
+ ret = ath10k_download_board_data(ar,
+ ar->running_fw->board_data,
+ ar->running_fw->board_len);
+ if (ret) {
+ ath10k_err(ar, "failed to download board data: %d\n", ret);
+ return ret;
+ }
+
+ /* OTP is optional */
+
+ if (!ar->running_fw->fw_file.otp_data ||
+ !ar->running_fw->fw_file.otp_len) {
+ ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %pK otp_len %zd)!\n",
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
+ return 0;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
+ address, ar->running_fw->fw_file.otp_len);
+
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
+ if (ret) {
+ ath10k_err(ar, "could not write otp (%d)\n", ret);
+ return ret;
+ }
+
+ /* As of now pre-cal is valid for 10_4 variants */
+ if (ar->cal_mode == ATH10K_PRE_CAL_MODE_DT ||
+ ar->cal_mode == ATH10K_PRE_CAL_MODE_FILE)
+ bmi_otp_exe_param = BMI_PARAM_FLASH_SECTION_ALL;
+
+ ret = ath10k_bmi_execute(ar, address, bmi_otp_exe_param, &result);
+ if (ret) {
+ ath10k_err(ar, "could not execute otp (%d)\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
+
+ if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
+ ar->running_fw->fw_file.fw_features)) &&
+ result != 0) {
+ ath10k_err(ar, "otp calibration failed: %d", result);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ath10k_download_fw(struct ath10k *ar)
+{
+ u32 address, data_len;
+ const void *data;
+ int ret;
+
+ address = ar->hw_params.patch_load_addr;
+
+ data = ar->running_fw->fw_file.firmware_data;
+ data_len = ar->running_fw->fw_file.firmware_len;
+
+ ret = ath10k_swap_code_seg_configure(ar, &ar->running_fw->fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to configure fw code swap: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot uploading firmware image %pK len %d\n",
+ data, data_len);
+
+ ret = ath10k_bmi_fast_download(ar, address, data, data_len);
+ if (ret) {
+ ath10k_err(ar, "failed to download firmware: %d\n",
+ ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ath10k_core_free_board_files(struct ath10k *ar)
+{
+ if (!IS_ERR(ar->normal_mode_fw.board))
+ release_firmware(ar->normal_mode_fw.board);
+
+ ar->normal_mode_fw.board = NULL;
+ ar->normal_mode_fw.board_data = NULL;
+ ar->normal_mode_fw.board_len = 0;
+}
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
+ if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware))
+ release_firmware(ar->normal_mode_fw.fw_file.firmware);
+
+ if (!IS_ERR(ar->cal_file))
+ release_firmware(ar->cal_file);
+
+ if (!IS_ERR(ar->pre_cal_file))
+ release_firmware(ar->pre_cal_file);
+
+ ath10k_swap_code_seg_release(ar, &ar->normal_mode_fw.fw_file);
+
+ ar->normal_mode_fw.fw_file.otp_data = NULL;
+ ar->normal_mode_fw.fw_file.otp_len = 0;
+
+ ar->normal_mode_fw.fw_file.firmware = NULL;
+ ar->normal_mode_fw.fw_file.firmware_data = NULL;
+ ar->normal_mode_fw.fw_file.firmware_len = 0;
+
+ ar->cal_file = NULL;
+ ar->pre_cal_file = NULL;
+}
+
+static int ath10k_fetch_cal_file(struct ath10k *ar)
+{
+ char filename[100];
+
+ /* pre-cal-<bus>-<id>.bin */
+ scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (!IS_ERR(ar->pre_cal_file))
+ goto success;
+
+ /* cal-<bus>-<id>.bin */
+ scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (IS_ERR(ar->cal_file))
+ /* calibration file is optional, don't print any warnings */
+ return PTR_ERR(ar->cal_file);
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
+ ATH10K_FW_DIR, filename);
+
+ return 0;
+}
+
+static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
+{
+ if (!ar->hw_params.fw.board) {
+ ath10k_err(ar, "failed to find board file fw entry\n");
+ return -EINVAL;
+ }
+
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
+
+ ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+ ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
+
+ return 0;
+}
+
+static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
+ const void *buf, size_t buf_len,
+ const char *boardname)
+{
+ const struct ath10k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH10K_BD_IE_BOARD_ elements */
+ while (buf_len > sizeof(struct ath10k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath10k_err(ar, "invalid ATH10K_BD_IE_BOARD length: %zu < %zu\n",
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (board_ie_id) {
+ case ATH10K_BD_IE_BOARD_NAME:
+ ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ break;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ break;
+
+ name_match_found = true;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot found match for name '%s'",
+ boardname);
+ break;
+ case ATH10K_BD_IE_BOARD_DATA:
+ if (!name_match_found)
+ /* no match found */
+ break;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot found board data for '%s'",
+ boardname);
+
+ ar->normal_mode_fw.board_data = board_ie_data;
+ ar->normal_mode_fw.board_len = board_ie_len;
+
+ ret = 0;
+ goto out;
+ default:
+ ath10k_warn(ar, "unknown ATH10K_BD_IE_BOARD found: %d\n",
+ board_ie_id);
+ break;
+ }
+
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath10k_core_search_bd(struct ath10k *ar,
+ const char *boardname,
+ const u8 *data,
+ size_t len)
+{
+ size_t ie_len;
+ struct ath10k_fw_ie *hdr;
+ int ret = -ENOENT, ie_id;
+
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath10k_err(ar, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ return -EINVAL;
+ }
+
+ switch (ie_id) {
+ case ATH10K_BD_IE_BOARD:
+ ret = ath10k_core_parse_bd_ie_board(ar, data, ie_len,
+ boardname);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ break;
+
+ /* either found or error, so stop searching */
+ goto out;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ /* return result of parse_bd_ie_board() or -ENOENT */
+ return ret;
+}
+
+static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
+ const char *boardname,
+ const char *fallback_boardname,
+ const char *filename)
+{
+ size_t len, magic_len;
+ const u8 *data;
+ int ret;
+
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ filename);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
+
+ data = ar->normal_mode_fw.board->data;
+ len = ar->normal_mode_fw.board->size;
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath10k_err(ar, "failed to find magic value in %s/%s, file too short: %zu\n",
+ ar->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH10K_BOARD_MAGIC, magic_len)) {
+ ath10k_err(ar, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath10k_err(ar, "failed: %s/%s too small to contain board data, len: %zu\n",
+ ar->hw_params.fw.dir, filename, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ /* attempt to find boardname in the IE list */
+ ret = ath10k_core_search_bd(ar, boardname, data, len);
+
+ /* if we didn't find it and have a fallback name, try that */
+ if (ret == -ENOENT && fallback_boardname)
+ ret = ath10k_core_search_bd(ar, fallback_boardname, data, len);
+
+ if (ret == -ENOENT) {
+ ath10k_err(ar,
+ "failed to fetch board data for %s from %s/%s\n",
+ boardname, ar->hw_params.fw.dir, filename);
+ ret = -ENODATA;
+ }
+
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath10k_core_free_board_files(ar);
+ return ret;
+}
+
+static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
+ size_t name_len, bool with_variant)
+{
+ /* strlen(',variant=') + strlen(ar->id.bdf_ext) */
+ char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
+
+ if (with_variant && ar->id.bdf_ext[0] != '\0')
+ scnprintf(variant, sizeof(variant), ",variant=%s",
+ ar->id.bdf_ext);
+
+ if (ar->id.bmi_ids_valid) {
+ scnprintf(name, name_len,
+ "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.bmi_chip_id,
+ ar->id.bmi_board_id, variant);
+ goto out;
+ }
+
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x%s",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.vendor, ar->id.device,
+ ar->id.subsystem_vendor, ar->id.subsystem_device, variant);
+out:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using board name '%s'\n", name);
+
+ return 0;
+}
+
+static int ath10k_core_fetch_board_file(struct ath10k *ar)
+{
+ char boardname[100], fallback_boardname[100];
+ int ret;
+
+ ret = ath10k_core_create_board_name(ar, boardname,
+ sizeof(boardname), true);
+ if (ret) {
+ ath10k_err(ar, "failed to create board name: %d", ret);
+ return ret;
+ }
+
+ ret = ath10k_core_create_board_name(ar, fallback_boardname,
+ sizeof(boardname), false);
+ if (ret) {
+ ath10k_err(ar, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+
+ ar->bd_api = 2;
+ ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
+ fallback_boardname,
+ ATH10K_BOARD_API2_FILE);
+ if (!ret)
+ goto success;
+
+ ar->bd_api = 1;
+ ret = ath10k_core_fetch_board_data_api_1(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board-2.bin or board.bin from %s\n",
+ ar->hw_params.fw.dir);
+ return ret;
+ }
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using board api %d\n", ar->bd_api);
+ return 0;
+}
+
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file)
+{
+ size_t magic_len, len, ie_len;
+ int ie_id, i, index, bit, ret;
+ struct ath10k_fw_ie *hdr;
+ const u8 *data;
+ __le32 *timestamp, *version;
+
+ /* first fetch the firmware file (firmware-*.bin) */
+ fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ name);
+ if (IS_ERR(fw_file->firmware))
+ return PTR_ERR(fw_file->firmware);
+
+ data = fw_file->firmware->data;
+ len = fw_file->firmware->size;
+
+ /* magic also includes the null byte, check that as well */
+ magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
+
+ if (len < magic_len) {
+ ath10k_err(ar, "firmware file '%s/%s' too small to contain magic: %zu\n",
+ ar->hw_params.fw.dir, name, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
+ ath10k_err(ar, "invalid firmware magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* jump over the padding */
+ magic_len = ALIGN(magic_len, 4);
+
+ len -= magic_len;
+ data += magic_len;
+
+ /* loop elements */
+ while (len > sizeof(struct ath10k_fw_ie)) {
+ hdr = (struct ath10k_fw_ie *)data;
+
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data += sizeof(*hdr);
+
+ if (len < ie_len) {
+ ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
+ ie_id, len, ie_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ switch (ie_id) {
+ case ATH10K_FW_IE_FW_VERSION:
+ if (ie_len > sizeof(fw_file->fw_version) - 1)
+ break;
+
+ memcpy(fw_file->fw_version, data, ie_len);
+ fw_file->fw_version[ie_len] = '\0';
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw version %s\n",
+ fw_file->fw_version);
+ break;
+ case ATH10K_FW_IE_TIMESTAMP:
+ if (ie_len != sizeof(u32))
+ break;
+
+ timestamp = (__le32 *)data;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw timestamp %d\n",
+ le32_to_cpup(timestamp));
+ break;
+ case ATH10K_FW_IE_FEATURES:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found firmware features ie (%zd B)\n",
+ ie_len);
+
+ for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
+ index = i / 8;
+ bit = i % 8;
+
+ if (index == ie_len)
+ break;
+
+ if (data[index] & (1 << bit)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "Enabling feature bit: %i\n",
+ i);
+ __set_bit(i, fw_file->fw_features);
+ }
+ }
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
+ fw_file->fw_features,
+ sizeof(fw_file->fw_features));
+ break;
+ case ATH10K_FW_IE_FW_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw image ie (%zd B)\n",
+ ie_len);
+
+ fw_file->firmware_data = data;
+ fw_file->firmware_len = ie_len;
+
+ break;
+ case ATH10K_FW_IE_OTP_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found otp image ie (%zd B)\n",
+ ie_len);
+
+ fw_file->otp_data = data;
+ fw_file->otp_len = ie_len;
+
+ break;
+ case ATH10K_FW_IE_WMI_OP_VERSION:
+ if (ie_len != sizeof(u32))
+ break;
+
+ version = (__le32 *)data;
+
+ fw_file->wmi_op_version = le32_to_cpup(version);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
+ fw_file->wmi_op_version);
+ break;
+ case ATH10K_FW_IE_HTT_OP_VERSION:
+ if (ie_len != sizeof(u32))
+ break;
+
+ version = (__le32 *)data;
+
+ fw_file->htt_op_version = le32_to_cpup(version);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
+ fw_file->htt_op_version);
+ break;
+ case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "found fw code swap image ie (%zd B)\n",
+ ie_len);
+ fw_file->codeswap_data = data;
+ fw_file->codeswap_len = ie_len;
+ break;
+ default:
+ ath10k_warn(ar, "Unknown FW IE: %u\n",
+ le32_to_cpu(hdr->id));
+ break;
+ }
+
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI, fw_file->fw_features) &&
+ (!fw_file->firmware_data || !fw_file->firmware_len)) {
+ ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
+ ar->hw_params.fw.dir, name);
+ ret = -ENOMEDIUM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath10k_core_free_firmware_files(ar);
+ return ret;
+}
+
+static void ath10k_core_get_fw_name(struct ath10k *ar, char *fw_name,
+ size_t fw_name_len, int fw_api)
+{
+ switch (ar->hif.bus) {
+ case ATH10K_BUS_SDIO:
+ case ATH10K_BUS_USB:
+ scnprintf(fw_name, fw_name_len, "%s-%s-%d.bin",
+ ATH10K_FW_FILE_BASE, ath10k_bus_str(ar->hif.bus),
+ fw_api);
+ break;
+ case ATH10K_BUS_PCI:
+ case ATH10K_BUS_AHB:
+ case ATH10K_BUS_SNOC:
+ scnprintf(fw_name, fw_name_len, "%s-%d.bin",
+ ATH10K_FW_FILE_BASE, fw_api);
+ break;
+ }
+}
+
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+ int ret, i;
+ char fw_name[100];
+
+ /* calibration file is optional, don't check for any errors */
+ ath10k_fetch_cal_file(ar);
+
+ for (i = ATH10K_FW_API_MAX; i >= ATH10K_FW_API_MIN; i--) {
+ ar->fw_api = i;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n",
+ ar->fw_api);
+
+ ath10k_core_get_fw_name(ar, fw_name, sizeof(fw_name), ar->fw_api);
+ ret = ath10k_core_fetch_firmware_api_n(ar, fw_name,
+ &ar->normal_mode_fw.fw_file);
+ if (!ret)
+ goto success;
+ }
+
+ /* we end up here if we couldn't fetch any firmware */
+
+ ath10k_err(ar, "Failed to find firmware-N.bin (N between %d and %d) from %s: %d",
+ ATH10K_FW_API_MIN, ATH10K_FW_API_MAX, ar->hw_params.fw.dir,
+ ret);
+
+ return ret;
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_download(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
+ goto success;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a pre calibration file, try DT next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "unable to load pre cal data from DT: %d\n", ret);
+ return ret;
+ }
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_DT;
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_config(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to load pre cal data: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to get board id: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_download_and_run_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to run otp: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal configuration done successfully\n");
+
+ return 0;
+}
+
+static int ath10k_download_cal_data(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_pre_cal_config(ar);
+ if (ret == 0)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal download procedure failed, try cal file: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_file(ar, ar->cal_file);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_FILE;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a calibration file, try DT next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_DT;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find DT entry, try target EEPROM next: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_eeprom(ar);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_CAL_MODE_EEPROM;
+ goto done;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find target EEPROM entry, try OTP next: %d\n",
+ ret);
+
+ ret = ath10k_download_and_run_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to run otp: %d\n", ret);
+ return ret;
+ }
+
+ ar->cal_mode = ATH10K_CAL_MODE_OTP;
+
+done:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+ return 0;
+}
+
+static int ath10k_init_uart(struct ath10k *ar)
+{
+ int ret;
+
+ /*
+ * Explicitly setting UART prints to zero as target turns it on
+ * based on scratch registers.
+ */
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 0);
+ if (ret) {
+ ath10k_warn(ar, "could not disable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ if (!uart_print)
+ return 0;
+
+ ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin, ar->hw_params.uart_pin);
+ if (ret) {
+ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_bmi_write32(ar, hi_serial_enable, 1);
+ if (ret) {
+ ath10k_warn(ar, "could not enable UART prints (%d)\n", ret);
+ return ret;
+ }
+
+ /* Set the UART baud rate to 19200. */
+ ret = ath10k_bmi_write32(ar, hi_desired_baud_rate, 19200);
+ if (ret) {
+ ath10k_warn(ar, "could not set the baud rate (%d)\n", ret);
+ return ret;
+ }
+
+ ath10k_info(ar, "UART prints enabled\n");
+ return 0;
+}
+
+static int ath10k_init_hw_params(struct ath10k *ar)
+{
+ const struct ath10k_hw_params *uninitialized_var(hw_params);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_hw_params_list); i++) {
+ hw_params = &ath10k_hw_params_list[i];
+
+ if (hw_params->id == ar->target_version &&
+ hw_params->dev_id == ar->dev_id)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath10k_hw_params_list)) {
+ ath10k_err(ar, "Unsupported hardware version: 0x%x\n",
+ ar->target_version);
+ return -EINVAL;
+ }
+
+ ar->hw_params = *hw_params;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "Hardware name %s version 0x%x\n",
+ ar->hw_params.name, ar->target_version);
+
+ return 0;
+}
+
+static void ath10k_core_restart(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+ int ret;
+
+ set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+ /* Place a barrier to make sure the compiler doesn't reorder
+ * CRASH_FLUSH and calling other functions.
+ */
+ barrier();
+
+ ieee80211_stop_queues(ar->hw);
+ ath10k_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->offchan_tx_completed);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->thermal.wmi_sync);
+ complete(&ar->bss_survey_done);
+ wake_up(&ar->htt.empty_tx_wq);
+ wake_up(&ar->wmi.tx_credits_wq);
+ wake_up(&ar->peer_mapping_wq);
+
+ /* TODO: We can have one instance of cancelling coverage_class_work by
+ * moving it to ath10k_halt(), so that both stop() and restart() would
+ * call that but it takes conf_mutex() and if we call cancel_work_sync()
+ * with conf_mutex it will deadlock.
+ */
+ cancel_work_sync(&ar->set_coverage_class_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH10K_STATE_ON:
+ ar->state = ATH10K_STATE_RESTARTING;
+ ath10k_halt(ar);
+ ath10k_scan_finish(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH10K_STATE_OFF:
+ /* this can happen if driver is being unloaded
+ * or if the crash happens during FW probing
+ */
+ ath10k_warn(ar, "cannot restart a device that hasn't been started\n");
+ break;
+ case ATH10K_STATE_RESTARTING:
+ /* hw restart might be requested from multiple places */
+ break;
+ case ATH10K_STATE_RESTARTED:
+ ar->state = ATH10K_STATE_WEDGED;
+ /* fall through */
+ case ATH10K_STATE_WEDGED:
+ ath10k_warn(ar, "device is wedged, will not restart\n");
+ break;
+ case ATH10K_STATE_UTF:
+ ath10k_warn(ar, "firmware restart in UTF mode not supported\n");
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = ath10k_coredump_submit(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d",
+ ret);
+}
+
+static void ath10k_core_set_coverage_class_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ set_coverage_class_work);
+
+ if (ar->hw_params.hw_ops->set_coverage_class)
+ ar->hw_params.hw_ops->set_coverage_class(ar, -1);
+}
+
+static int ath10k_core_init_firmware_features(struct ath10k *ar)
+{
+ struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
+ ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
+ return -EINVAL;
+ }
+
+ if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+ ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
+ ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version);
+ return -EINVAL;
+ }
+
+ ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_NATIVE_WIFI;
+ switch (ath10k_cryptmode_param) {
+ case ATH10K_CRYPT_MODE_HW:
+ clear_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ clear_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+ break;
+ case ATH10K_CRYPT_MODE_SW:
+ if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+ fw_file->fw_features)) {
+ ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
+ return -EINVAL;
+ }
+
+ set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ set_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags);
+ break;
+ default:
+ ath10k_info(ar, "invalid cryptmode: %d\n",
+ ath10k_cryptmode_param);
+ return -EINVAL;
+ }
+
+ ar->htt.max_num_amsdu = ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT;
+ ar->htt.max_num_ampdu = ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT;
+
+ if (rawmode) {
+ if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
+ fw_file->fw_features)) {
+ ath10k_err(ar, "rawmode = 1 requires support from firmware");
+ return -EINVAL;
+ }
+ set_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags);
+ }
+
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ar->wmi.rx_decap_mode = ATH10K_HW_TXRX_RAW;
+
+ /* Workaround:
+ *
+ * Firmware A-MSDU aggregation breaks with RAW Tx encap mode
+ * and causes enormous performance issues (malformed frames,
+ * etc).
+ *
+ * Disabling A-MSDU makes RAW mode stable with heavy traffic
+ * albeit a bit slower compared to regular operation.
+ */
+ ar->htt.max_num_amsdu = 1;
+ }
+
+ /* Backwards compatibility for firmwares without
+ * ATH10K_FW_IE_WMI_OP_VERSION.
+ */
+ if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
+ fw_file->fw_features))
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+ else
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ } else {
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+ }
+ }
+
+ switch (fw_file->wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->max_num_peers = TARGET_NUM_PEERS;
+ ar->max_num_stations = TARGET_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+ WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ if (ath10k_peer_stats_enabled(ar)) {
+ ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
+ } else {
+ ar->max_num_peers = TARGET_10X_NUM_PEERS;
+ ar->max_num_stations = TARGET_10X_NUM_STATIONS;
+ }
+ ar->max_num_vdevs = TARGET_10X_NUM_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_10X_NUM_MSDU_DESC;
+ ar->fw_stats_req_mask = WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ar->max_num_peers = TARGET_TLV_NUM_PEERS;
+ ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
+ ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
+ ar->max_num_tdls_vdevs = TARGET_TLV_NUM_TDLS_VDEVS;
+ ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
+ ar->wow.max_num_patterns = TARGET_TLV_NUM_WOW_PATTERNS;
+ ar->fw_stats_req_mask = WMI_STAT_PDEV | WMI_STAT_VDEV |
+ WMI_STAT_PEER;
+ ar->max_spatial_stream = WMI_MAX_SPATIAL_STREAM;
+ ar->wmi.mgmt_max_num_pending_tx = TARGET_TLV_MGMT_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->max_num_peers = TARGET_10_4_NUM_PEERS;
+ ar->max_num_stations = TARGET_10_4_NUM_STATIONS;
+ ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
+ ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
+ ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
+ ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
+ WMI_10_4_STAT_PEER_EXTD |
+ WMI_10_4_STAT_VDEV_EXTD;
+ ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
+ ar->max_num_tdls_vdevs = TARGET_10_4_NUM_TDLS_VDEVS;
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ fw_file->fw_features))
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC;
+ else
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /* Backwards compatibility for firmwares without
+ * ATH10K_FW_IE_HTT_OP_VERSION.
+ */
+ if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+ switch (fw_file->wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ ath10k_err(ar, "htt op version not found from fw meta data");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_core_reset_rx_filter(struct ath10k *ar)
+{
+ int ret;
+ int vdev_id;
+ int vdev_type;
+ int vdev_subtype;
+ const u8 *vdev_addr;
+
+ vdev_id = 0;
+ vdev_type = WMI_VDEV_TYPE_STA;
+ vdev_subtype = ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
+ vdev_addr = ar->mac_addr;
+
+ ret = ath10k_wmi_vdev_create(ar, vdev_id, vdev_type, vdev_subtype,
+ vdev_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to create dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_delete(ar, vdev_id);
+ if (ret) {
+ ath10k_err(ar, "failed to delete dummy vdev: %d\n", ret);
+ return ret;
+ }
+
+ /* WMI and HTT may use separate HIF pipes and are not guaranteed to be
+ * serialized properly implicitly.
+ *
+ * Moreover (most) WMI commands have no explicit acknowledges. It is
+ * possible to infer it implicitly by poking firmware with echo
+ * command - getting a reply means all preceding comments have been
+ * (mostly) processed.
+ *
+ * In case of vdev create/delete this is sufficient.
+ *
+ * Without this it's possible to end up with a race when HTT Rx ring is
+ * started before vdev create/delete hack is complete allowing a short
+ * window of opportunity to receive (and Tx ACK) a bunch of frames.
+ */
+ ret = ath10k_wmi_barrier(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to ping firmware: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw)
+{
+ int status;
+ u32 val;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+
+ ar->running_fw = fw;
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->running_fw->fw_file.fw_features)) {
+ ath10k_bmi_start(ar);
+
+ if (ath10k_init_configure_target(ar)) {
+ status = -EINVAL;
+ goto err;
+ }
+
+ status = ath10k_download_cal_data(ar);
+ if (status)
+ goto err;
+
+ /* Some of of qca988x solutions are having global reset issue
+ * during target initialization. Bypassing PLL setting before
+ * downloading firmware and letting the SoC run on REF_CLK is
+ * fixing the problem. Corresponding firmware change is also
+ * needed to set the clock source once the target is
+ * initialized.
+ */
+ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
+ if (status) {
+ ath10k_err(ar, "could not write to skip_clock_init: %d\n",
+ status);
+ goto err;
+ }
+ }
+
+ status = ath10k_download_fw(ar);
+ if (status)
+ goto err;
+
+ status = ath10k_init_uart(ar);
+ if (status)
+ goto err;
+
+ if (ar->hif.bus == ATH10K_BUS_SDIO)
+ ath10k_init_sdio(ar);
+ }
+
+ ar->htc.htc_ops.target_send_suspend_complete =
+ ath10k_send_suspend_complete;
+
+ status = ath10k_htc_init(ar);
+ if (status) {
+ ath10k_err(ar, "could not init HTC (%d)\n", status);
+ goto err;
+ }
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->running_fw->fw_file.fw_features)) {
+ status = ath10k_bmi_done(ar);
+ if (status)
+ goto err;
+ }
+
+ status = ath10k_wmi_attach(ar);
+ if (status) {
+ ath10k_err(ar, "WMI attach failed: %d\n", status);
+ goto err;
+ }
+
+ status = ath10k_htt_init(ar);
+ if (status) {
+ ath10k_err(ar, "failed to init htt: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ status = ath10k_htt_tx_start(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to alloc htt tx: %d\n", status);
+ goto err_wmi_detach;
+ }
+
+ /* If firmware indicates Full Rx Reorder support it must be used in a
+ * slightly different manner. Let HTT code know.
+ */
+ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+ ar->wmi.svc_map));
+
+ status = ath10k_htt_rx_alloc(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
+ goto err_htt_tx_detach;
+ }
+
+ status = ath10k_hif_start(ar);
+ if (status) {
+ ath10k_err(ar, "could not start HIF: %d\n", status);
+ goto err_htt_rx_detach;
+ }
+
+ status = ath10k_htc_wait_target(&ar->htc);
+ if (status) {
+ ath10k_err(ar, "failed to connect to HTC: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_htt_connect(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to connect htt (%d)\n", status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_wmi_connect(ar);
+ if (status) {
+ ath10k_err(ar, "could not connect wmi: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_htc_start(&ar->htc);
+ if (status) {
+ ath10k_err(ar, "failed to start htc: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_wmi_wait_for_service_ready(ar);
+ if (status) {
+ ath10k_warn(ar, "wmi service ready event not received");
+ goto err_hif_stop;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
+ ar->hw->wiphy->fw_version);
+
+ if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map) &&
+ mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ val = 0;
+ if (ath10k_peer_stats_enabled(ar))
+ val = WMI_10_4_PEER_STATS;
+
+ /* Enable vdev stats by default */
+ val |= WMI_10_4_VDEV_STATS;
+
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+
+ /* 10.4 firmware supports BT-Coex without reloading firmware
+ * via pdev param. To support Bluetooth coexistence pdev param,
+ * WMI_COEX_GPIO_SUPPORT of extended resource config should be
+ * enabled always.
+ */
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features))
+ val |= WMI_10_4_COEX_GPIO_SUPPORT;
+
+ if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ ar->wmi.svc_map))
+ val |= WMI_10_4_TDLS_EXPLICIT_MODE_ONLY;
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ ar->wmi.svc_map))
+ val |= WMI_10_4_TDLS_UAPSD_BUFFER_STA;
+
+ status = ath10k_mac_ext_resource_config(ar, val);
+ if (status) {
+ ath10k_err(ar,
+ "failed to send ext resource cfg command : %d\n",
+ status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_wmi_cmd_init(ar);
+ if (status) {
+ ath10k_err(ar, "could not send WMI init command (%d)\n",
+ status);
+ goto err_hif_stop;
+ }
+
+ status = ath10k_wmi_wait_for_unified_ready(ar);
+ if (status) {
+ ath10k_err(ar, "wmi unified ready event not received\n");
+ goto err_hif_stop;
+ }
+
+ /* Some firmware revisions do not properly set up hardware rx filter
+ * registers.
+ *
+ * A known example from QCA9880 and 10.2.4 is that MAC_PCU_ADDR1_MASK
+ * is filled with 0s instead of 1s allowing HW to respond with ACKs to
+ * any frames that matches MAC_PCU_RX_FILTER which is also
+ * misconfigured to accept anything.
+ *
+ * The ADDR1 is programmed using internal firmware structure field and
+ * can't be (easily/sanely) reached from the driver explicitly. It is
+ * possible to implicitly make it correct by creating a dummy vdev and
+ * then deleting it.
+ */
+ if (ar->hw_params.hw_filter_reset_required &&
+ mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_core_reset_rx_filter(ar);
+ if (status) {
+ ath10k_err(ar,
+ "failed to reset rx filter: %d\n", status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_htt_rx_ring_refill(ar);
+ if (status) {
+ ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
+ goto err_hif_stop;
+ }
+
+ if (ar->max_num_vdevs >= 64)
+ ar->free_vdev_map = 0xFFFFFFFFFFFFFFFFLL;
+ else
+ ar->free_vdev_map = (1LL << ar->max_num_vdevs) - 1;
+
+ INIT_LIST_HEAD(&ar->arvifs);
+
+ /* we don't care about HTT in UTF mode */
+ if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
+ status = ath10k_htt_setup(&ar->htt);
+ if (status) {
+ ath10k_err(ar, "failed to setup htt: %d\n", status);
+ goto err_hif_stop;
+ }
+ }
+
+ status = ath10k_debug_start(ar);
+ if (status)
+ goto err_hif_stop;
+
+ return 0;
+
+err_hif_stop:
+ ath10k_hif_stop(ar);
+err_htt_rx_detach:
+ ath10k_htt_rx_free(&ar->htt);
+err_htt_tx_detach:
+ ath10k_htt_tx_free(&ar->htt);
+err_wmi_detach:
+ ath10k_wmi_detach(ar);
+err:
+ return status;
+}
+EXPORT_SYMBOL(ath10k_core_start);
+
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+ int ret;
+ unsigned long time_left;
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_pdev_suspend_target(ar, suspend_opt);
+ if (ret) {
+ ath10k_warn(ar, "could not suspend target (%d)\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->target_suspend, 1 * HZ);
+
+ if (!time_left) {
+ ath10k_warn(ar, "suspend timed out - target pause event never came\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+void ath10k_core_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+ ath10k_debug_stop(ar);
+
+ /* try to suspend target */
+ if (ar->state != ATH10K_STATE_RESTARTING &&
+ ar->state != ATH10K_STATE_UTF)
+ ath10k_wait_for_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR);
+
+ ath10k_hif_stop(ar);
+ ath10k_htt_tx_stop(&ar->htt);
+ ath10k_htt_rx_free(&ar->htt);
+ ath10k_wmi_detach(ar);
+}
+EXPORT_SYMBOL(ath10k_core_stop);
+
+/* mac80211 manages fw/hw initialization through start/stop hooks. However in
+ * order to know what hw capabilities should be advertised to mac80211 it is
+ * necessary to load the firmware (and tear it down immediately since start
+ * hook will try to init it again) before registering
+ */
+static int ath10k_core_probe_fw(struct ath10k *ar)
+{
+ struct bmi_target_info target_info;
+ int ret = 0;
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err(ar, "could not power on hif bus (%d)\n", ret);
+ return ret;
+ }
+
+ switch (ar->hif.bus) {
+ case ATH10K_BUS_SDIO:
+ memset(&target_info, 0, sizeof(target_info));
+ ret = ath10k_bmi_get_target_info_sdio(ar, &target_info);
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ case ATH10K_BUS_PCI:
+ case ATH10K_BUS_AHB:
+ case ATH10K_BUS_USB:
+ memset(&target_info, 0, sizeof(target_info));
+ ret = ath10k_bmi_get_target_info(ar, &target_info);
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ case ATH10K_BUS_SNOC:
+ memset(&target_info, 0, sizeof(target_info));
+ ret = ath10k_hif_get_target_info(ar, &target_info);
+ if (ret) {
+ ath10k_err(ar, "could not get target info (%d)\n", ret);
+ goto err_power_down;
+ }
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
+ break;
+ default:
+ ath10k_err(ar, "incorrect hif bus type: %d\n", ar->hif.bus);
+ }
+
+ ret = ath10k_init_hw_params(ar);
+ if (ret) {
+ ath10k_err(ar, "could not get hw params (%d)\n", ret);
+ goto err_power_down;
+ }
+
+ ret = ath10k_core_fetch_firmware_files(ar);
+ if (ret) {
+ ath10k_err(ar, "could not fetch firmware files (%d)\n", ret);
+ goto err_power_down;
+ }
+
+ BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) !=
+ sizeof(ar->normal_mode_fw.fw_file.fw_version));
+ memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version,
+ sizeof(ar->hw->wiphy->fw_version));
+
+ ath10k_debug_print_hwfw_info(ar);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ /* pre calibration data download is not necessary
+ * for all the chipsets. Ignore failures and continue.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "could not load pre cal data: %d\n", ret);
+ }
+
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_err(ar, "failed to get board id from otp: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
+
+ ret = ath10k_core_check_smbios(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "SMBIOS bdf variant name not set.\n");
+
+ ret = ath10k_core_check_dt(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "DT bdf variant name not set.\n");
+
+ ret = ath10k_core_fetch_board_file(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch board file: %d\n", ret);
+ goto err_free_firmware_files;
+ }
+
+ ath10k_debug_print_board_info(ar);
+ }
+
+ ret = ath10k_core_init_firmware_features(ar);
+ if (ret) {
+ ath10k_err(ar, "fatal problem with firmware features: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->normal_mode_fw.fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize code swap segment: %d\n",
+ ret);
+ goto err_free_firmware_files;
+ }
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
+ if (ret) {
+ ath10k_err(ar, "could not init core (%d)\n", ret);
+ goto err_unlock;
+ }
+
+ ath10k_debug_print_boot_info(ar);
+ ath10k_core_stop(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ath10k_hif_power_down(ar);
+ return 0;
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+err_free_firmware_files:
+ ath10k_core_free_firmware_files(ar);
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+ return ret;
+}
+
+static void ath10k_core_register_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, register_work);
+ int status;
+
+ /* peer stats are enabled by default */
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+ status = ath10k_core_probe_fw(ar);
+ if (status) {
+ ath10k_err(ar, "could not probe fw (%d)\n", status);
+ goto err;
+ }
+
+ status = ath10k_mac_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register to mac80211 (%d)\n", status);
+ goto err_release_fw;
+ }
+
+ status = ath10k_coredump_register(ar);
+ if (status) {
+ ath10k_err(ar, "unable to register coredump\n");
+ goto err_unregister_mac;
+ }
+
+ status = ath10k_debug_register(ar);
+ if (status) {
+ ath10k_err(ar, "unable to initialize debugfs\n");
+ goto err_unregister_coredump;
+ }
+
+ status = ath10k_spectral_create(ar);
+ if (status) {
+ ath10k_err(ar, "failed to initialize spectral\n");
+ goto err_debug_destroy;
+ }
+
+ status = ath10k_thermal_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register thermal device: %d\n",
+ status);
+ goto err_spectral_destroy;
+ }
+
+ set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
+ return;
+
+err_spectral_destroy:
+ ath10k_spectral_destroy(ar);
+err_debug_destroy:
+ ath10k_debug_destroy(ar);
+err_unregister_coredump:
+ ath10k_coredump_unregister(ar);
+err_unregister_mac:
+ ath10k_mac_unregister(ar);
+err_release_fw:
+ ath10k_core_free_firmware_files(ar);
+err:
+ /* TODO: It's probably a good idea to release device from the driver
+ * but calling device_release_driver() here will cause a deadlock.
+ */
+ return;
+}
+
+int ath10k_core_register(struct ath10k *ar, u32 chip_id)
+{
+ ar->chip_id = chip_id;
+ queue_work(ar->workqueue, &ar->register_work);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+ cancel_work_sync(&ar->register_work);
+
+ if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ return;
+
+ ath10k_thermal_unregister(ar);
+ /* Stop spectral before unregistering from mac80211 to remove the
+ * relayfs debugfs file cleanly. Otherwise the parent debugfs tree
+ * would be already be free'd recursively, leading to a double free.
+ */
+ ath10k_spectral_destroy(ar);
+
+ /* We must unregister from mac80211 before we stop HTC and HIF.
+ * Otherwise we will fail to submit commands to FW and mac80211 will be
+ * unhappy about callback failures.
+ */
+ ath10k_mac_unregister(ar);
+
+ ath10k_testmode_destroy(ar);
+
+ ath10k_core_free_firmware_files(ar);
+ ath10k_core_free_board_files(ar);
+
+ ath10k_debug_unregister(ar);
+}
+EXPORT_SYMBOL(ath10k_core_unregister);
+
+struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
+ enum ath10k_hw_rev hw_rev,
+ const struct ath10k_hif_ops *hif_ops)
+{
+ struct ath10k *ar;
+ int ret;
+
+ ar = ath10k_mac_create(priv_size);
+ if (!ar)
+ return NULL;
+
+ ar->ath_common.priv = ar;
+ ar->ath_common.hw = ar->hw;
+ ar->dev = dev;
+ ar->hw_rev = hw_rev;
+ ar->hif.ops = hif_ops;
+ ar->hif.bus = bus;
+
+ switch (hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
+ ar->regs = &qca988x_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca988x_values;
+ break;
+ case ATH10K_HW_QCA6174:
+ case ATH10K_HW_QCA9377:
+ ar->regs = &qca6174_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca6174_values;
+ break;
+ case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ ar->regs = &qca99x0_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca99x0_values;
+ break;
+ case ATH10K_HW_QCA9888:
+ ar->regs = &qca99x0_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca9888_values;
+ break;
+ case ATH10K_HW_QCA4019:
+ ar->regs = &qca4019_regs;
+ ar->hw_ce_regs = &qcax_ce_regs;
+ ar->hw_values = &qca4019_values;
+ break;
+ case ATH10K_HW_WCN3990:
+ ar->regs = &wcn3990_regs;
+ ar->hw_ce_regs = &wcn3990_ce_regs;
+ ar->hw_values = &wcn3990_values;
+ break;
+ default:
+ ath10k_err(ar, "unsupported core hardware revision %d\n",
+ hw_rev);
+ ret = -ENOTSUPP;
+ goto err_free_mac;
+ }
+
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+ init_completion(&ar->target_suspend);
+ init_completion(&ar->wow.wakeup_completed);
+
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->thermal.wmi_sync);
+ init_completion(&ar->bss_survey_done);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
+
+ ar->workqueue = create_singlethread_workqueue("ath10k_wq");
+ if (!ar->workqueue)
+ goto err_free_mac;
+
+ ar->workqueue_aux = create_singlethread_workqueue("ath10k_aux_wq");
+ if (!ar->workqueue_aux)
+ goto err_free_wq;
+
+ mutex_init(&ar->conf_mutex);
+ spin_lock_init(&ar->data_lock);
+ spin_lock_init(&ar->txqs_lock);
+
+ INIT_LIST_HEAD(&ar->txqs);
+ INIT_LIST_HEAD(&ar->peers);
+ init_waitqueue_head(&ar->peer_mapping_wq);
+ init_waitqueue_head(&ar->htt.empty_tx_wq);
+ init_waitqueue_head(&ar->wmi.tx_credits_wq);
+
+ init_completion(&ar->offchan_tx_completed);
+ INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
+ skb_queue_head_init(&ar->offchan_tx_queue);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+ INIT_WORK(&ar->register_work, ath10k_core_register_work);
+ INIT_WORK(&ar->restart_work, ath10k_core_restart);
+ INIT_WORK(&ar->set_coverage_class_work,
+ ath10k_core_set_coverage_class_work);
+
+ init_dummy_netdev(&ar->napi_dev);
+
+ ret = ath10k_coredump_create(ar);
+ if (ret)
+ goto err_free_aux_wq;
+
+ ret = ath10k_debug_create(ar);
+ if (ret)
+ goto err_free_coredump;
+
+ return ar;
+
+err_free_coredump:
+ ath10k_coredump_destroy(ar);
+
+err_free_aux_wq:
+ destroy_workqueue(ar->workqueue_aux);
+err_free_wq:
+ destroy_workqueue(ar->workqueue);
+
+err_free_mac:
+ ath10k_mac_destroy(ar);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_core_create);
+
+void ath10k_core_destroy(struct ath10k *ar)
+{
+ flush_workqueue(ar->workqueue);
+ destroy_workqueue(ar->workqueue);
+
+ flush_workqueue(ar->workqueue_aux);
+ destroy_workqueue(ar->workqueue_aux);
+
+ ath10k_debug_destroy(ar);
+ ath10k_coredump_destroy(ar);
+ ath10k_htt_tx_destroy(&ar->htt);
+ ath10k_wmi_free_host_mem(ar);
+ ath10k_mac_destroy(ar);
+}
+EXPORT_SYMBOL(ath10k_core_destroy);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ac wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
new file mode 100644
index 000000000..5c9fc4070
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -0,0 +1,1174 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+
+#include "htt.h"
+#include "htc.h"
+#include "hw.h"
+#include "targaddrs.h"
+#include "wmi.h"
+#include "../ath.h"
+#include "../regd.h"
+#include "../dfs_pattern_detector.h"
+#include "spectral.h"
+#include "thermal.h"
+#include "wow.h"
+#include "swap.h"
+
+#define MS(_v, _f) (((_v) & _f##_MASK) >> _f##_LSB)
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+#define WO(_f) ((_f##_OFFSET) >> 2)
+
+#define ATH10K_SCAN_ID 0
+#define ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* msec */
+#define WMI_READY_TIMEOUT (5 * HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
+#define ATH10K_NUM_CHANS 41
+#define ATH10K_MAX_5G_CHAN 173
+
+/* Antenna noise floor */
+#define ATH10K_DEFAULT_NOISE_FLOOR -95
+
+#define ATH10K_INVALID_RSSI 128
+
+#define ATH10K_MAX_NUM_MGMT_PENDING 128
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH10K_KICKOUT_THRESHOLD (20 * 16)
+
+/*
+ * Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH10K_KEEPALIVE_MIN_IDLE 3747
+#define ATH10K_KEEPALIVE_MAX_IDLE 3895
+#define ATH10K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+/* NAPI poll budget */
+#define ATH10K_NAPI_BUDGET 64
+
+/* SMBIOS type containing Board Data File Name Extension */
+#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
+
+/* SMBIOS type structure length (excluding strings-set) */
+#define ATH10K_SMBIOS_BDF_EXT_LENGTH 0x9
+
+/* Offset pointing to Board Data File Name Extension */
+#define ATH10K_SMBIOS_BDF_EXT_OFFSET 0x8
+
+/* Board Data File Name Extension string length.
+ * String format: BDF_<Customer ID>_<Extension>\0
+ */
+#define ATH10K_SMBIOS_BDF_EXT_STR_LENGTH 0x20
+
+/* The magic used by QCA spec */
+#define ATH10K_SMBIOS_BDF_EXT_MAGIC "BDF_"
+
+struct ath10k;
+
+enum ath10k_bus {
+ ATH10K_BUS_PCI,
+ ATH10K_BUS_AHB,
+ ATH10K_BUS_SDIO,
+ ATH10K_BUS_USB,
+ ATH10K_BUS_SNOC,
+};
+
+static inline const char *ath10k_bus_str(enum ath10k_bus bus)
+{
+ switch (bus) {
+ case ATH10K_BUS_PCI:
+ return "pci";
+ case ATH10K_BUS_AHB:
+ return "ahb";
+ case ATH10K_BUS_SDIO:
+ return "sdio";
+ case ATH10K_BUS_USB:
+ return "usb";
+ case ATH10K_BUS_SNOC:
+ return "snoc";
+ }
+
+ return "unknown";
+}
+
+enum ath10k_skb_flags {
+ ATH10K_SKB_F_NO_HWCRYPT = BIT(0),
+ ATH10K_SKB_F_DTIM_ZERO = BIT(1),
+ ATH10K_SKB_F_DELIVER_CAB = BIT(2),
+ ATH10K_SKB_F_MGMT = BIT(3),
+ ATH10K_SKB_F_QOS = BIT(4),
+};
+
+struct ath10k_skb_cb {
+ dma_addr_t paddr;
+ u8 flags;
+ u8 eid;
+ u16 msdu_id;
+ struct ieee80211_vif *vif;
+ struct ieee80211_txq *txq;
+} __packed;
+
+struct ath10k_skb_rxcb {
+ dma_addr_t paddr;
+ struct hlist_node hlist;
+};
+
+static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+ return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath10k_skb_rxcb *)skb->cb;
+}
+
+#define ATH10K_RXCB_SKB(rxcb) \
+ container_of((void *)rxcb, struct sk_buff, cb)
+
+static inline u32 host_interest_item_address(u32 item_offset)
+{
+ return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
+}
+
+struct ath10k_bmi {
+ bool done_sent;
+};
+
+struct ath10k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct ath10k_wmi {
+ enum ath10k_htc_ep_id eid;
+ struct completion service_ready;
+ struct completion unified_ready;
+ struct completion barrier;
+ struct completion radar_confirm;
+ wait_queue_head_t tx_credits_wq;
+ DECLARE_BITMAP(svc_map, WMI_SERVICE_MAX);
+ struct wmi_cmd_map *cmd;
+ struct wmi_vdev_param_map *vdev_param;
+ struct wmi_pdev_param_map *pdev_param;
+ const struct wmi_ops *ops;
+ const struct wmi_peer_flags_map *peer_flags;
+
+ u32 mgmt_max_num_pending_tx;
+
+ /* Protected by data_lock */
+ struct idr mgmt_pending_tx;
+
+ u32 num_mem_chunks;
+ u32 rx_decap_mode;
+ struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+};
+
+struct ath10k_fw_stats_peer {
+ struct list_head list;
+
+ u8 peer_macaddr[ETH_ALEN];
+ u32 peer_rssi;
+ u32 peer_tx_rate;
+ u32 peer_rx_rate; /* 10x only */
+ u32 rx_duration;
+};
+
+struct ath10k_fw_extd_stats_peer {
+ struct list_head list;
+
+ u8 peer_macaddr[ETH_ALEN];
+ u32 rx_duration;
+};
+
+struct ath10k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[4];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[4];
+ u32 num_tx_frames_failures[4];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[10];
+ u32 beacon_rssi_history[10];
+};
+
+struct ath10k_fw_stats_vdev_extd {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 ppdu_aggr_cnt;
+ u32 ppdu_noack;
+ u32 mpdu_queued;
+ u32 ppdu_nonaggr_cnt;
+ u32 mpdu_sw_requeued;
+ u32 mpdu_suc_retry;
+ u32 mpdu_suc_multitry;
+ u32 mpdu_fail_retry;
+ u32 tx_ftm_suc;
+ u32 tx_ftm_suc_retry;
+ u32 tx_ftm_fail;
+ u32 rx_ftmr_cnt;
+ u32 rx_ftmr_dup_cnt;
+ u32 rx_iftmr_cnt;
+ u32 rx_iftmr_dup_cnt;
+};
+
+struct ath10k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ s32 comp_queued;
+ s32 comp_delivered;
+ s32 msdu_enqued;
+ s32 mpdu_enqued;
+ s32 wmm_drop;
+ s32 local_enqued;
+ s32 local_freed;
+ s32 hw_queued;
+ s32 hw_reaped;
+ s32 underrun;
+ u32 hw_paused;
+ s32 tx_abort;
+ s32 mpdus_requed;
+ u32 tx_ko;
+ u32 data_rc;
+ u32 self_triggers;
+ u32 sw_retry_failure;
+ u32 illgl_rate_phy_err;
+ u32 pdev_cont_xretry;
+ u32 pdev_tx_timeout;
+ u32 pdev_resets;
+ u32 phy_underrun;
+ u32 txop_ovf;
+ u32 seq_posted;
+ u32 seq_failed_queueing;
+ u32 seq_completed;
+ u32 seq_restarted;
+ u32 mu_seq_posted;
+ u32 mpdus_sw_flush;
+ u32 mpdus_hw_filter;
+ u32 mpdus_truncated;
+ u32 mpdus_ack_failed;
+ u32 mpdus_expired;
+
+ /* PDEV RX stats */
+ s32 mid_ppdu_route_change;
+ s32 status_rcvd;
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ s32 oversize_amsdu;
+ s32 phy_errs;
+ s32 phy_err_drop;
+ s32 mpdu_errs;
+ s32 rx_ovfl_errs;
+};
+
+struct ath10k_fw_stats {
+ bool extended;
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head peers;
+ struct list_head peers_extd;
+};
+
+#define ATH10K_TPC_TABLE_TYPE_FLAG 1
+#define ATH10K_TPC_PREAM_TABLE_END 0xFFFF
+
+struct ath10k_tpc_table {
+ u32 pream_idx[WMI_TPC_RATE_MAX];
+ u8 rate_code[WMI_TPC_RATE_MAX];
+ char tpc_value[WMI_TPC_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+};
+
+struct ath10k_tpc_stats {
+ u32 reg_domain;
+ u32 chan_freq;
+ u32 phy_mode;
+ u32 twice_antenna_reduction;
+ u32 twice_max_rd_power;
+ s32 twice_antenna_gain;
+ u32 power_limit;
+ u32 num_tx_chain;
+ u32 ctl;
+ u32 rate_max;
+ u8 flag[WMI_TPC_FLAG];
+ struct ath10k_tpc_table tpc_table[WMI_TPC_FLAG];
+};
+
+struct ath10k_tpc_table_final {
+ u32 pream_idx[WMI_TPC_FINAL_RATE_MAX];
+ u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
+ char tpc_value[WMI_TPC_FINAL_RATE_MAX][WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+};
+
+struct ath10k_tpc_stats_final {
+ u32 reg_domain;
+ u32 chan_freq;
+ u32 phy_mode;
+ u32 twice_antenna_reduction;
+ u32 twice_max_rd_power;
+ s32 twice_antenna_gain;
+ u32 power_limit;
+ u32 num_tx_chain;
+ u32 ctl;
+ u32 rate_max;
+ u8 flag[WMI_TPC_FLAG];
+ struct ath10k_tpc_table_final tpc_table_final[WMI_TPC_FLAG];
+};
+
+struct ath10k_dfs_stats {
+ u32 phy_errors;
+ u32 pulses_total;
+ u32 pulses_detected;
+ u32 pulses_discarded;
+ u32 radar_detected;
+};
+
+enum ath10k_radar_confirmation_state {
+ ATH10K_RADAR_CONFIRMATION_IDLE = 0,
+ ATH10K_RADAR_CONFIRMATION_INPROGRESS,
+ ATH10K_RADAR_CONFIRMATION_STOPPED,
+};
+
+struct ath10k_radar_found_info {
+ u32 pri_min;
+ u32 pri_max;
+ u32 width_min;
+ u32 width_max;
+ u32 sidx_min;
+ u32 sidx_max;
+};
+
+#define ATH10K_MAX_NUM_PEER_IDS (1 << 11) /* htt rx_desc limit */
+
+struct ath10k_peer {
+ struct list_head list;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+
+ bool removed;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
+
+ /* protected by ar->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+};
+
+struct ath10k_txq {
+ struct list_head list;
+ unsigned long num_fw_queued;
+ unsigned long num_push_allowed;
+};
+
+enum ath10k_pkt_rx_err {
+ ATH10K_PKT_RX_ERR_FCS,
+ ATH10K_PKT_RX_ERR_TKIP,
+ ATH10K_PKT_RX_ERR_CRYPT,
+ ATH10K_PKT_RX_ERR_PEER_IDX_INVAL,
+ ATH10K_PKT_RX_ERR_MAX,
+};
+
+enum ath10k_ampdu_subfrm_num {
+ ATH10K_AMPDU_SUBFRM_NUM_10,
+ ATH10K_AMPDU_SUBFRM_NUM_20,
+ ATH10K_AMPDU_SUBFRM_NUM_30,
+ ATH10K_AMPDU_SUBFRM_NUM_40,
+ ATH10K_AMPDU_SUBFRM_NUM_50,
+ ATH10K_AMPDU_SUBFRM_NUM_60,
+ ATH10K_AMPDU_SUBFRM_NUM_MORE,
+ ATH10K_AMPDU_SUBFRM_NUM_MAX,
+};
+
+enum ath10k_amsdu_subfrm_num {
+ ATH10K_AMSDU_SUBFRM_NUM_1,
+ ATH10K_AMSDU_SUBFRM_NUM_2,
+ ATH10K_AMSDU_SUBFRM_NUM_3,
+ ATH10K_AMSDU_SUBFRM_NUM_4,
+ ATH10K_AMSDU_SUBFRM_NUM_MORE,
+ ATH10K_AMSDU_SUBFRM_NUM_MAX,
+};
+
+struct ath10k_sta_tid_stats {
+ unsigned long int rx_pkt_from_fw;
+ unsigned long int rx_pkt_unchained;
+ unsigned long int rx_pkt_drop_chained;
+ unsigned long int rx_pkt_drop_filter;
+ unsigned long int rx_pkt_err[ATH10K_PKT_RX_ERR_MAX];
+ unsigned long int rx_pkt_queued_for_mac;
+ unsigned long int rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MAX];
+ unsigned long int rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MAX];
+};
+
+struct ath10k_sta {
+ struct ath10k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+ u16 peer_id;
+ struct rate_info txrate;
+
+ struct work_struct update_wk;
+ u64 rx_duration;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* protected by conf_mutex */
+ bool aggr_mode;
+
+ /* Protected with ar->data_lock */
+ struct ath10k_sta_tid_stats tid_stats[IEEE80211_NUM_TIDS + 1];
+#endif
+};
+
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
+
+enum ath10k_beacon_state {
+ ATH10K_BEACON_SCHEDULED = 0,
+ ATH10K_BEACON_SENDING,
+ ATH10K_BEACON_SENT,
+};
+
+struct ath10k_vif {
+ struct list_head list;
+
+ u32 vdev_id;
+ u16 peer_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ struct sk_buff *beacon;
+ /* protected by data_lock */
+ enum ath10k_beacon_state beacon_state;
+ void *beacon_buf;
+ dma_addr_t beacon_paddr;
+ unsigned long tx_paused; /* arbitrary values defined by target */
+
+ struct ath10k *ar;
+ struct ieee80211_vif *vif;
+
+ bool is_started;
+ bool is_up;
+ bool spectral_enabled;
+ bool ps;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+
+ struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
+ s8 def_wep_key_idx;
+
+ u16 tx_seq_no;
+
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 512 stations */
+ u8 tim_bitmap[64];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ bool use_cts_prot;
+ bool nohwcrypt;
+ int num_legacy_stations;
+ int txpower;
+ struct wmi_wmm_params_all_arg wmm_params;
+ struct work_struct ap_csa_work;
+ struct delayed_work connection_loss_work;
+ struct cfg80211_bitrate_mask bitrate_mask;
+};
+
+struct ath10k_vif_iter {
+ u32 vdev_id;
+ struct ath10k_vif *arvif;
+};
+
+/* Copy Engine register dump, protected by ce-lock */
+struct ath10k_ce_crash_data {
+ __le32 base_addr;
+ __le32 src_wr_idx;
+ __le32 src_r_idx;
+ __le32 dst_wr_idx;
+ __le32 dst_r_idx;
+};
+
+struct ath10k_ce_crash_hdr {
+ __le32 ce_count;
+ __le32 reserved[3]; /* for future use */
+ struct ath10k_ce_crash_data entries[];
+};
+
+#define MAX_MEM_DUMP_TYPE 5
+
+/* used for crash-dump storage, protected by data-lock */
+struct ath10k_fw_crash_data {
+ guid_t guid;
+ struct timespec64 timestamp;
+ __le32 registers[REG_DUMP_COUNT_QCA988X];
+ struct ath10k_ce_crash_data ce_crash_data[CE_COUNT_MAX];
+
+ u8 *ramdump_buf;
+ size_t ramdump_buf_len;
+};
+
+struct ath10k_debug {
+ struct dentry *debugfs_phy;
+
+ struct ath10k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+
+ unsigned long htt_stats_mask;
+ struct delayed_work htt_stats_dwork;
+ struct ath10k_dfs_stats dfs_stats;
+ struct ath_dfs_pool_stats dfs_pool_stats;
+
+ /* used for tpc-dump storage, protected by data-lock */
+ struct ath10k_tpc_stats *tpc_stats;
+ struct ath10k_tpc_stats_final *tpc_stats_final;
+
+ struct completion tpc_complete;
+
+ /* protected by conf_mutex */
+ u64 fw_dbglog_mask;
+ u32 fw_dbglog_level;
+ u32 reg_addr;
+ u32 nf_cal_period;
+ void *cal_data;
+};
+
+enum ath10k_state {
+ ATH10K_STATE_OFF = 0,
+ ATH10K_STATE_ON,
+
+ /* When doing firmware recovery the device is first powered down.
+ * mac80211 is supposed to call in to start() hook later on. It is
+ * however possible that driver unloading and firmware crash overlap.
+ * mac80211 can wait on conf_mutex in stop() while the device is
+ * stopped in ath10k_core_restart() work holding conf_mutex. The state
+ * RESTARTED means that the device is up and mac80211 has started hw
+ * reconfiguration. Once mac80211 is done with the reconfiguration we
+ * set the state to STATE_ON in reconfig_complete().
+ */
+ ATH10K_STATE_RESTARTING,
+ ATH10K_STATE_RESTARTED,
+
+ /* The device has crashed while restarting hw. This state is like ON
+ * but commands are blocked in HTC and -ECOMM response is given. This
+ * prevents completion timeouts and makes the driver more responsive to
+ * userspace commands. This is also prevents recursive recovery.
+ */
+ ATH10K_STATE_WEDGED,
+
+ /* factory tests */
+ ATH10K_STATE_UTF,
+};
+
+enum ath10k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH10K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH10K_FIRMWARE_MODE_UTF,
+};
+
+enum ath10k_fw_features {
+ /* wmi_mgmt_rx_hdr contains extra RSSI information */
+ ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
+
+ /* Firmware from 10X branch. Deprecated, don't use in new code. */
+ ATH10K_FW_FEATURE_WMI_10X = 1,
+
+ /* firmware support tx frame management over WMI, otherwise it's HTT */
+ ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
+
+ /* Firmware does not support P2P */
+ ATH10K_FW_FEATURE_NO_P2P = 3,
+
+ /* Firmware 10.2 feature bit. The ATH10K_FW_FEATURE_WMI_10X feature
+ * bit is required to be set as well. Deprecated, don't use in new
+ * code.
+ */
+ ATH10K_FW_FEATURE_WMI_10_2 = 4,
+
+ /* Some firmware revisions lack proper multi-interface client powersave
+ * implementation. Enabling PS could result in connection drops,
+ * traffic stalls, etc.
+ */
+ ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT = 5,
+
+ /* Some firmware revisions have an incomplete WoWLAN implementation
+ * despite WMI service bit being advertised. This feature flag is used
+ * to distinguish whether WoWLAN is really supported or not.
+ */
+ ATH10K_FW_FEATURE_WOWLAN_SUPPORT = 6,
+
+ /* Don't trust error code from otp.bin */
+ ATH10K_FW_FEATURE_IGNORE_OTP_RESULT = 7,
+
+ /* Some firmware revisions pad 4th hw address to 4 byte boundary making
+ * it 8 bytes long in Native Wifi Rx decap.
+ */
+ ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING = 8,
+
+ /* Firmware supports bypassing PLL setting on init. */
+ ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT = 9,
+
+ /* Raw mode support. If supported, FW supports receiving and trasmitting
+ * frames in raw mode.
+ */
+ ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
+
+ /* Firmware Supports Adaptive CCA*/
+ ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11,
+
+ /* Firmware supports management frame protection */
+ ATH10K_FW_FEATURE_MFP_SUPPORT = 12,
+
+ /* Firmware supports pull-push model where host shares it's software
+ * queue state with firmware and firmware generates fetch requests
+ * telling host which queues to dequeue tx from.
+ *
+ * Primary function of this is improved MU-MIMO performance with
+ * multiple clients.
+ */
+ ATH10K_FW_FEATURE_PEER_FLOW_CONTROL = 13,
+
+ /* Firmware supports BT-Coex without reloading firmware via pdev param.
+ * To support Bluetooth coexistence pdev param, WMI_COEX_GPIO_SUPPORT of
+ * extended resource config should be enabled always. This firmware IE
+ * is used to configure WMI_COEX_GPIO_SUPPORT.
+ */
+ ATH10K_FW_FEATURE_BTCOEX_PARAM = 14,
+
+ /* Unused flag and proven to be not working, enable this if you want
+ * to experiment sending NULL func data frames in HTT TX
+ */
+ ATH10K_FW_FEATURE_SKIP_NULL_FUNC_WAR = 15,
+
+ /* Firmware allow other BSS mesh broadcast/multicast frames without
+ * creating monitor interface. Appropriate rxfilters are programmed for
+ * mesh vdev by firmware itself. This feature flags will be used for
+ * not creating monitor vdev while configuring mesh node.
+ */
+ ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST = 16,
+
+ /* Firmware does not support power save in station mode. */
+ ATH10K_FW_FEATURE_NO_PS = 17,
+
+ /* Firmware allows management tx by reference instead of by value. */
+ ATH10K_FW_FEATURE_MGMT_TX_BY_REF = 18,
+
+ /* Firmware load is done externally, not by bmi */
+ ATH10K_FW_FEATURE_NON_BMI = 19,
+
+ /* keep last */
+ ATH10K_FW_FEATURE_COUNT,
+};
+
+enum ath10k_dev_flags {
+ /* Indicates that ath10k device is during CAC phase of DFS */
+ ATH10K_CAC_RUNNING,
+ ATH10K_FLAG_CORE_REGISTERED,
+
+ /* Device has crashed and needs to restart. This indicates any pending
+ * waiters should immediately cancel instead of waiting for a time out.
+ */
+ ATH10K_FLAG_CRASH_FLUSH,
+
+ /* Use Raw mode instead of native WiFi Tx/Rx encap mode.
+ * Raw mode supports both hardware and software crypto. Native WiFi only
+ * supports hardware crypto.
+ */
+ ATH10K_FLAG_RAW_MODE,
+
+ /* Disable HW crypto engine */
+ ATH10K_FLAG_HW_CRYPTO_DISABLED,
+
+ /* Bluetooth coexistance enabled */
+ ATH10K_FLAG_BTCOEX,
+
+ /* Per Station statistics service */
+ ATH10K_FLAG_PEER_STATS,
+};
+
+enum ath10k_cal_mode {
+ ATH10K_CAL_MODE_FILE,
+ ATH10K_CAL_MODE_OTP,
+ ATH10K_CAL_MODE_DT,
+ ATH10K_PRE_CAL_MODE_FILE,
+ ATH10K_PRE_CAL_MODE_DT,
+ ATH10K_CAL_MODE_EEPROM,
+};
+
+enum ath10k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH10K_CRYPT_MODE_HW,
+ /* Only use software crypto engine */
+ ATH10K_CRYPT_MODE_SW,
+};
+
+static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
+{
+ switch (mode) {
+ case ATH10K_CAL_MODE_FILE:
+ return "file";
+ case ATH10K_CAL_MODE_OTP:
+ return "otp";
+ case ATH10K_CAL_MODE_DT:
+ return "dt";
+ case ATH10K_PRE_CAL_MODE_FILE:
+ return "pre-cal-file";
+ case ATH10K_PRE_CAL_MODE_DT:
+ return "pre-cal-dt";
+ case ATH10K_CAL_MODE_EEPROM:
+ return "eeprom";
+ }
+
+ return "unknown";
+}
+
+enum ath10k_scan_state {
+ ATH10K_SCAN_IDLE,
+ ATH10K_SCAN_STARTING,
+ ATH10K_SCAN_RUNNING,
+ ATH10K_SCAN_ABORTING,
+};
+
+static inline const char *ath10k_scan_state_str(enum ath10k_scan_state state)
+{
+ switch (state) {
+ case ATH10K_SCAN_IDLE:
+ return "idle";
+ case ATH10K_SCAN_STARTING:
+ return "starting";
+ case ATH10K_SCAN_RUNNING:
+ return "running";
+ case ATH10K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+enum ath10k_tx_pause_reason {
+ ATH10K_TX_PAUSE_Q_FULL,
+ ATH10K_TX_PAUSE_MAX,
+};
+
+struct ath10k_fw_file {
+ const struct firmware *firmware;
+
+ char fw_version[ETHTOOL_FWVERS_LEN];
+
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+ enum ath10k_fw_wmi_op_version wmi_op_version;
+ enum ath10k_fw_htt_op_version htt_op_version;
+
+ const void *firmware_data;
+ size_t firmware_len;
+
+ const void *otp_data;
+ size_t otp_len;
+
+ const void *codeswap_data;
+ size_t codeswap_len;
+
+ /* The original idea of struct ath10k_fw_file was that it only
+ * contains struct firmware and pointers to various parts (actual
+ * firmware binary, otp, metadata etc) of the file. This seg_info
+ * is actually created separate but as this is used similarly as
+ * the other firmware components it's more convenient to have it
+ * here.
+ */
+ struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
+};
+
+struct ath10k_fw_components {
+ const struct firmware *board;
+ const void *board_data;
+ size_t board_len;
+
+ struct ath10k_fw_file fw_file;
+};
+
+struct ath10k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u8 ratecode;
+ u8 flags;
+ u16 peer_id;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u16 duration;
+ u32 reserved1;
+ u32 reserved2;
+};
+
+struct ath10k {
+ struct ath_common ath_common;
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct device *dev;
+ u8 mac_addr[ETH_ALEN];
+
+ enum ath10k_hw_rev hw_rev;
+ u16 dev_id;
+ u32 chip_id;
+ u32 target_version;
+ u8 fw_version_major;
+ u32 fw_version_minor;
+ u16 fw_version_release;
+ u16 fw_version_build;
+ u32 fw_stats_req_mask;
+ u32 phy_capability;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 hw_eeprom_rd;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 num_rf_chains;
+ u32 max_spatial_stream;
+ /* protected by conf_mutex */
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+ bool ani_enabled;
+
+ bool p2p;
+
+ struct {
+ enum ath10k_bus bus;
+ const struct ath10k_hif_ops *ops;
+ } hif;
+
+ struct completion target_suspend;
+
+ const struct ath10k_hw_regs *regs;
+ const struct ath10k_hw_ce_regs *hw_ce_regs;
+ const struct ath10k_hw_values *hw_values;
+ struct ath10k_bmi bmi;
+ struct ath10k_wmi wmi;
+ struct ath10k_htc htc;
+ struct ath10k_htt htt;
+
+ struct ath10k_hw_params hw_params;
+
+ /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
+ struct ath10k_fw_components normal_mode_fw;
+
+ /* READ-ONLY images of the running firmware, which can be either
+ * normal or UTF. Do not modify, release etc!
+ */
+ const struct ath10k_fw_components *running_fw;
+
+ const struct firmware *pre_cal_file;
+ const struct firmware *cal_file;
+
+ struct {
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+
+ bool bmi_ids_valid;
+ u8 bmi_board_id;
+ u8 bmi_chip_id;
+
+ char bdf_ext[ATH10K_SMBIOS_BDF_EXT_STR_LENGTH];
+ } id;
+
+ int fw_api;
+ int bd_api;
+ enum ath10k_cal_mode cal_mode;
+
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath10k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ bool roc_notify;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+ } mac;
+
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ /* current operating channel definition */
+ struct cfg80211_chan_def chandef;
+
+ /* currently configured operating channel in firmware */
+ struct ieee80211_channel *tgt_oper_chan;
+
+ unsigned long long free_vdev_map;
+ struct ath10k_vif *monitor_arvif;
+ bool monitor;
+ int monitor_vdev_id;
+ bool monitor_started;
+ unsigned int filter_flags;
+ unsigned long dev_flags;
+ bool dfs_block_radar_events;
+
+ /* protected by conf_mutex */
+ bool radar_enabled;
+ int num_started_vdevs;
+
+ /* Protected by conf-mutex */
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+
+ struct completion install_key_done;
+
+ int last_wmi_vdev_start_status;
+ struct completion vdev_setup_done;
+
+ struct workqueue_struct *workqueue;
+ /* Auxiliary workqueue */
+ struct workqueue_struct *workqueue_aux;
+
+ /* prevents concurrent FW reconfiguration */
+ struct mutex conf_mutex;
+
+ /* protects shared structure data */
+ spinlock_t data_lock;
+ /* protects: ar->txqs, artxq->list */
+ spinlock_t txqs_lock;
+
+ struct list_head txqs;
+ struct list_head arvifs;
+ struct list_head peers;
+ struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
+ wait_queue_head_t peer_mapping_wq;
+
+ /* protected by conf_mutex */
+ int num_peers;
+ int num_stations;
+
+ int max_num_peers;
+ int max_num_stations;
+ int max_num_vdevs;
+ int max_num_tdls_vdevs;
+ int num_active_peers;
+ int num_tids;
+
+ struct work_struct svc_rdy_work;
+ struct sk_buff *svc_rdy_skb;
+
+ struct work_struct offchan_tx_work;
+ struct sk_buff_head offchan_tx_queue;
+ struct completion offchan_tx_completed;
+ struct sk_buff *offchan_tx_skb;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ enum ath10k_state state;
+
+ struct work_struct register_work;
+ struct work_struct restart_work;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock
+ */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+ struct survey_info survey[ATH10K_NUM_CHANS];
+
+ /* Channel info events are expected to come in pairs without and with
+ * COMPLETE flag set respectively for each channel visit during scan.
+ *
+ * However there are deviations from this rule. This flag is used to
+ * avoid reporting garbage data.
+ */
+ bool ch_info_can_report_survey;
+ struct completion bss_survey_done;
+
+ struct dfs_pattern_detector *dfs_detector;
+
+ unsigned long tx_paused; /* see ATH10K_TX_PAUSE_ */
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+ struct ath10k_debug debug;
+ struct {
+ /* relay(fs) channel for spectral scan */
+ struct rchan *rfs_chan_spec_scan;
+
+ /* spectral_mode and spec_config are protected by conf_mutex */
+ enum ath10k_spectral_mode mode;
+ struct ath10k_spec_scan config;
+ } spectral;
+#endif
+
+ u32 pktlog_filter;
+
+#ifdef CONFIG_DEV_COREDUMP
+ struct {
+ struct ath10k_fw_crash_data *fw_crash_data;
+ } coredump;
+#endif
+
+ struct {
+ /* protected by conf_mutex */
+ struct ath10k_fw_components utf_mode_fw;
+
+ /* protected by data_lock */
+ bool utf_monitor;
+ } testmode;
+
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ u32 fw_warm_reset_counter;
+ u32 fw_cold_reset_counter;
+ } stats;
+
+ struct ath10k_thermal thermal;
+ struct ath10k_wow wow;
+ struct ath10k_per_peer_tx_stats peer_tx_stats;
+
+ /* NAPI */
+ struct net_device napi_dev;
+ struct napi_struct napi;
+
+ struct work_struct set_coverage_class_work;
+ /* protected by conf_mutex */
+ struct {
+ /* writing also protected by data_lock */
+ s16 coverage_class;
+
+ u32 reg_phyclk;
+ u32 reg_slottime_conf;
+ u32 reg_slottime_orig;
+ u32 reg_ack_cts_timeout_conf;
+ u32 reg_ack_cts_timeout_orig;
+ } fw_coverage;
+
+ u32 ampdu_reference;
+
+ void *ce_priv;
+
+ u32 sta_tid_stats_mask;
+
+ /* protected by data_lock */
+ enum ath10k_radar_confirmation_state radar_conf_state;
+ struct ath10k_radar_found_info last_radar_info;
+ struct work_struct radar_confirmation_work;
+
+ /* must be last */
+ u8 drv_priv[0] __aligned(sizeof(void *));
+};
+
+static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) &&
+ test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ return true;
+
+ return false;
+}
+
+extern unsigned long ath10k_coredump_mask;
+
+struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
+ enum ath10k_bus bus,
+ enum ath10k_hw_rev hw_rev,
+ const struct ath10k_hif_ops *hif_ops);
+void ath10k_core_destroy(struct ath10k *ar);
+void ath10k_core_get_fw_features_str(struct ath10k *ar,
+ char *buf,
+ size_t max_len);
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file);
+
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw_components);
+int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
+void ath10k_core_stop(struct ath10k *ar);
+int ath10k_core_register(struct ath10k *ar, u32 chip_id);
+void ath10k_core_unregister(struct ath10k *ar);
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
new file mode 100644
index 000000000..385b84f24
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -0,0 +1,1187 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "coredump.h"
+
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+
+#include "debug.h"
+#include "hw.h"
+
+static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A064},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8000, 0x8004},
+ {0x8010, 0x8064},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80F4},
+ {0x8100, 0x8104},
+ {0x8110, 0x812C},
+ {0x9000, 0x9004},
+ {0x9800, 0x982C},
+ {0x9830, 0x9838},
+ {0x9840, 0x986C},
+ {0x9870, 0x9898},
+ {0x9A00, 0x9C00},
+ {0xD580, 0xD59C},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A074},
+ {0x40000, 0x400A4},
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
+static const struct ath10k_mem_region qca6174_hw10_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* RTC_SOC_BASE_ADDRESS */
+ .start = 0x0,
+
+ /* WLAN_MBOX_BASE_ADDRESS - RTC_SOC_BASE_ADDRESS */
+ .len = 0x800 - 0x0,
+
+ .name = "REG_PART1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+
+ /* STEREO_BASE_ADDRESS */
+ .start = 0x27000,
+
+ /* USB_BASE_ADDRESS - STEREO_BASE_ADDRESS */
+ .len = 0x60000 - 0x27000,
+
+ .name = "REG_PART2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x70000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw21_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw21_register_sections),
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0xa8000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw30_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw30_register_sections),
+ },
+ },
+
+ /* IRAM dump must be put last */
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM1,
+ .start = 0x00980000,
+ .len = 0x00080000,
+ .name = "IRAM1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM2,
+ .start = 0x00a00000,
+ .len = 0x00040000,
+ .name = "IRAM2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca988x_hw20_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x50000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x4000,
+ .len = 0x2000,
+ .name = "REG_PART1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x8000,
+ .len = 0x58000,
+ .name = "REG_PART2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca99x0_hw20_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x60000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x98000,
+ .len = 0x50000,
+ .name = "IRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOSRAM,
+ .start = 0xC0000,
+ .len = 0x40000,
+ .name = "SRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x30000,
+ .len = 0x7000,
+ .name = "APB REG 1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x3f000,
+ .len = 0x3000,
+ .name = "APB REG 2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x43000,
+ .len = 0x3000,
+ .name = "WIFI REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x4A000,
+ .len = 0x5000,
+ .name = "CE REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x80000,
+ .len = 0x6000,
+ .name = "SOC REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0x80000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x98000,
+ .len = 0x50000,
+ .name = "IRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOSRAM,
+ .start = 0xC0000,
+ .len = 0x40000,
+ .name = "SRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x30000,
+ .len = 0x7000,
+ .name = "APB REG 1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x3f000,
+ .len = 0x3000,
+ .name = "APB REG 2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x43000,
+ .len = 0x3000,
+ .name = "WIFI REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x4A000,
+ .len = 0x5000,
+ .name = "CE REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IOREG,
+ .start = 0x80000,
+ .len = 0x6000,
+ .name = "SOC REG",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+};
+
+static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
+ {
+ .hw_id = QCA6174_HW_1_0_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_1_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_1_3_VERSION,
+ .region_table = {
+ .regions = qca6174_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_2_1_VERSION,
+ .region_table = {
+ .regions = qca6174_hw21_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw21_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_0_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA6174_HW_3_2_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA9377_HW_1_1_DEV_VERSION,
+ .region_table = {
+ .regions = qca6174_hw30_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA988X_HW_2_0_VERSION,
+ .region_table = {
+ .regions = qca988x_hw20_mem_regions,
+ .size = ARRAY_SIZE(qca988x_hw20_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA9984_HW_1_0_DEV_VERSION,
+ .region_table = {
+ .regions = qca9984_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca9984_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA9888_HW_2_0_DEV_VERSION,
+ .region_table = {
+ .regions = qca9984_hw10_mem_regions,
+ .size = ARRAY_SIZE(qca9984_hw10_mem_regions),
+ },
+ },
+ {
+ .hw_id = QCA99X0_HW_2_0_DEV_VERSION,
+ .region_table = {
+ .regions = qca99x0_hw20_mem_regions,
+ .size = ARRAY_SIZE(qca99x0_hw20_mem_regions),
+ },
+ },
+
+};
+
+static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
+{
+ const struct ath10k_hw_mem_layout *hw;
+ const struct ath10k_mem_region *mem_region;
+ size_t size = 0;
+ int i;
+
+ hw = ath10k_coredump_get_mem_layout(ar);
+
+ if (!hw)
+ return 0;
+
+ mem_region = &hw->region_table.regions[0];
+
+ for (i = 0; i < hw->region_table.size; i++) {
+ size += mem_region->len;
+ mem_region++;
+ }
+
+ /* reserve space for the headers */
+ size += hw->region_table.size * sizeof(struct ath10k_dump_ram_data_hdr);
+
+ /* make sure it is aligned 16 bytes for debug message print out */
+ size = ALIGN(size, 16);
+
+ return size;
+}
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ int i;
+
+ if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ return NULL;
+
+ if (WARN_ON(ar->target_version == 0))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
+ if (ar->target_version == hw_mem_layouts[i].hw_id)
+ return &hw_mem_layouts[i];
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
+
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return NULL;
+
+ guid_gen(&crash_data->guid);
+ ktime_get_real_ts64(&crash_data->timestamp);
+
+ return crash_data;
+}
+EXPORT_SYMBOL(ath10k_coredump_new);
+
+static struct ath10k_dump_file_data *ath10k_coredump_build(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+ struct ath10k_ce_crash_hdr *ce_hdr;
+ struct ath10k_dump_file_data *dump_data;
+ struct ath10k_tlv_dump_data *dump_tlv;
+ size_t hdr_len = sizeof(*dump_data);
+ size_t len, sofar = 0;
+ unsigned char *buf;
+
+ len = hdr_len;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
+ len += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+
+ sofar += hdr_len;
+
+ /* This is going to get big when we start dumping FW RAM and such,
+ * so go ahead and use vmalloc.
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return NULL;
+
+ spin_lock_bh(&ar->data_lock);
+
+ dump_data = (struct ath10k_dump_file_data *)(buf);
+ strlcpy(dump_data->df_magic, "ATH10K-FW-DUMP",
+ sizeof(dump_data->df_magic));
+ dump_data->len = cpu_to_le32(len);
+
+ dump_data->version = cpu_to_le32(ATH10K_FW_CRASH_DUMP_VERSION);
+
+ guid_copy(&dump_data->guid, &crash_data->guid);
+ dump_data->chip_id = cpu_to_le32(ar->chip_id);
+ dump_data->bus_type = cpu_to_le32(0);
+ dump_data->target_version = cpu_to_le32(ar->target_version);
+ dump_data->fw_version_major = cpu_to_le32(ar->fw_version_major);
+ dump_data->fw_version_minor = cpu_to_le32(ar->fw_version_minor);
+ dump_data->fw_version_release = cpu_to_le32(ar->fw_version_release);
+ dump_data->fw_version_build = cpu_to_le32(ar->fw_version_build);
+ dump_data->phy_capability = cpu_to_le32(ar->phy_capability);
+ dump_data->hw_min_tx_power = cpu_to_le32(ar->hw_min_tx_power);
+ dump_data->hw_max_tx_power = cpu_to_le32(ar->hw_max_tx_power);
+ dump_data->ht_cap_info = cpu_to_le32(ar->ht_cap_info);
+ dump_data->vht_cap_info = cpu_to_le32(ar->vht_cap_info);
+ dump_data->num_rf_chains = cpu_to_le32(ar->num_rf_chains);
+
+ strlcpy(dump_data->fw_ver, ar->hw->wiphy->fw_version,
+ sizeof(dump_data->fw_ver));
+
+ dump_data->kernel_ver_code = 0;
+ strlcpy(dump_data->kernel_ver, init_utsname()->release,
+ sizeof(dump_data->kernel_ver));
+
+ dump_data->tv_sec = cpu_to_le64(crash_data->timestamp.tv_sec);
+ dump_data->tv_nsec = cpu_to_le64(crash_data->timestamp.tv_nsec);
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_REGISTERS, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_REGISTERS);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(crash_data->registers));
+ memcpy(dump_tlv->tlv_data, &crash_data->registers,
+ sizeof(crash_data->registers));
+ sofar += sizeof(*dump_tlv) + sizeof(crash_data->registers);
+ }
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_CE_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_CE_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ ce_hdr = (struct ath10k_ce_crash_hdr *)(dump_tlv->tlv_data);
+ ce_hdr->ce_count = cpu_to_le32(CE_COUNT);
+ memset(ce_hdr->reserved, 0, sizeof(ce_hdr->reserved));
+ memcpy(ce_hdr->entries, crash_data->ce_crash_data,
+ CE_COUNT * sizeof(ce_hdr->entries[0]));
+ sofar += sizeof(*dump_tlv) + sizeof(*ce_hdr) +
+ CE_COUNT * sizeof(ce_hdr->entries[0]);
+ }
+
+ /* Gather ram dump */
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ dump_tlv = (struct ath10k_tlv_dump_data *)(buf + sofar);
+ dump_tlv->type = cpu_to_le32(ATH10K_FW_CRASH_DUMP_RAM_DATA);
+ dump_tlv->tlv_len = cpu_to_le32(crash_data->ramdump_buf_len);
+ if (crash_data->ramdump_buf_len) {
+ memcpy(dump_tlv->tlv_data, crash_data->ramdump_buf,
+ crash_data->ramdump_buf_len);
+ sofar += sizeof(*dump_tlv) + crash_data->ramdump_buf_len;
+ }
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return dump_data;
+}
+
+int ath10k_coredump_submit(struct ath10k *ar)
+{
+ struct ath10k_dump_file_data *dump;
+
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ dump = ath10k_coredump_build(ar);
+ if (!dump) {
+ ath10k_warn(ar, "no crash dump data found for devcoredump");
+ return -ENODATA;
+ }
+
+ dev_coredumpv(ar->dev, dump, le32_to_cpu(dump->len), GFP_KERNEL);
+
+ return 0;
+}
+
+int ath10k_coredump_create(struct ath10k *ar)
+{
+ if (ath10k_coredump_mask == 0)
+ /* coredump disabled */
+ return 0;
+
+ ar->coredump.fw_crash_data = vzalloc(sizeof(*ar->coredump.fw_crash_data));
+ if (!ar->coredump.fw_crash_data)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int ath10k_coredump_register(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ if (test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask)) {
+ crash_data->ramdump_buf_len = ath10k_coredump_get_ramdump_size(ar);
+
+ if (!crash_data->ramdump_buf_len)
+ return 0;
+
+ crash_data->ramdump_buf = vzalloc(crash_data->ramdump_buf_len);
+ if (!crash_data->ramdump_buf)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void ath10k_coredump_unregister(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data = ar->coredump.fw_crash_data;
+
+ vfree(crash_data->ramdump_buf);
+}
+
+void ath10k_coredump_destroy(struct ath10k *ar)
+{
+ if (ar->coredump.fw_crash_data->ramdump_buf) {
+ vfree(ar->coredump.fw_crash_data->ramdump_buf);
+ ar->coredump.fw_crash_data->ramdump_buf = NULL;
+ ar->coredump.fw_crash_data->ramdump_buf_len = 0;
+ }
+
+ vfree(ar->coredump.fw_crash_data);
+ ar->coredump.fw_crash_data = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
new file mode 100644
index 000000000..3baaf9d2c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "core.h"
+
+#define ATH10K_FW_CRASH_DUMP_VERSION 1
+
+/**
+ * enum ath10k_fw_crash_dump_type - types of data in the dump file
+ * @ATH10K_FW_CRASH_DUMP_REGDUMP: Register crash dump in binary format
+ */
+enum ath10k_fw_crash_dump_type {
+ ATH10K_FW_CRASH_DUMP_REGISTERS = 0,
+ ATH10K_FW_CRASH_DUMP_CE_DATA = 1,
+
+ /* contains multiple struct ath10k_dump_ram_data_hdr */
+ ATH10K_FW_CRASH_DUMP_RAM_DATA = 2,
+
+ ATH10K_FW_CRASH_DUMP_MAX,
+};
+
+struct ath10k_tlv_dump_data {
+ /* see ath10k_fw_crash_dump_type above */
+ __le32 type;
+
+ /* in bytes */
+ __le32 tlv_len;
+
+ /* pad to 32-bit boundaries as needed */
+ u8 tlv_data[];
+} __packed;
+
+struct ath10k_dump_file_data {
+ /* dump file information */
+
+ /* "ATH10K-FW-DUMP" */
+ char df_magic[16];
+
+ __le32 len;
+
+ /* file dump version */
+ __le32 version;
+
+ /* some info we can get from ath10k struct that might help */
+
+ guid_t guid;
+
+ __le32 chip_id;
+
+ /* 0 for now, in place for later hardware */
+ __le32 bus_type;
+
+ __le32 target_version;
+ __le32 fw_version_major;
+ __le32 fw_version_minor;
+ __le32 fw_version_release;
+ __le32 fw_version_build;
+ __le32 phy_capability;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 num_rf_chains;
+
+ /* firmware version string */
+ char fw_ver[ETHTOOL_FWVERS_LEN];
+
+ /* Kernel related information */
+
+ /* time-of-day stamp */
+ __le64 tv_sec;
+
+ /* time-of-day stamp, nano-seconds */
+ __le64 tv_nsec;
+
+ /* LINUX_VERSION_CODE */
+ __le32 kernel_ver_code;
+
+ /* VERMAGIC_STRING */
+ char kernel_ver[64];
+
+ /* room for growth w/out changing binary format */
+ u8 unused[128];
+
+ /* struct ath10k_tlv_dump_data + more */
+ u8 data[0];
+} __packed;
+
+struct ath10k_dump_ram_data_hdr {
+ /* enum ath10k_mem_region_type */
+ __le32 region_type;
+
+ __le32 start;
+
+ /* length of payload data, not including this header */
+ __le32 length;
+
+ u8 data[0];
+};
+
+/* magic number to fill the holes not copied due to sections in regions */
+#define ATH10K_MAGIC_NOT_COPIED 0xAA
+
+/* part of user space ABI */
+enum ath10k_mem_region_type {
+ ATH10K_MEM_REGION_TYPE_REG = 1,
+ ATH10K_MEM_REGION_TYPE_DRAM = 2,
+ ATH10K_MEM_REGION_TYPE_AXI = 3,
+ ATH10K_MEM_REGION_TYPE_IRAM1 = 4,
+ ATH10K_MEM_REGION_TYPE_IRAM2 = 5,
+ ATH10K_MEM_REGION_TYPE_IOSRAM = 6,
+ ATH10K_MEM_REGION_TYPE_IOREG = 7,
+};
+
+/* Define a section of the region which should be copied. As not all parts
+ * of the memory is possible to copy, for example some of the registers can
+ * be like that, sections can be used to define what is safe to copy.
+ *
+ * To minimize the size of the array, the list must obey the format:
+ * '{start0,stop0},{start1,stop1},{start2,stop2}....' The values below must
+ * also obey to 'start0 < stop0 < start1 < stop1 < start2 < ...', otherwise
+ * we may encouter error in the dump processing.
+ */
+struct ath10k_mem_section {
+ u32 start;
+ u32 end;
+};
+
+/* One region of a memory layout. If the sections field is null entire
+ * region is copied. If sections is non-null only the areas specified in
+ * sections are copied and rest of the areas are filled with
+ * ATH10K_MAGIC_NOT_COPIED.
+ */
+struct ath10k_mem_region {
+ enum ath10k_mem_region_type type;
+ u32 start;
+ u32 len;
+
+ const char *name;
+
+ struct {
+ const struct ath10k_mem_section *sections;
+ u32 size;
+ } section_table;
+};
+
+/* Contains the memory layout of a hardware version identified with the
+ * hardware id, split into regions.
+ */
+struct ath10k_hw_mem_layout {
+ u32 hw_id;
+
+ struct {
+ const struct ath10k_mem_region *regions;
+ int size;
+ } region_table;
+};
+
+/* FIXME: where to put this? */
+extern unsigned long ath10k_coredump_mask;
+
+#ifdef CONFIG_DEV_COREDUMP
+
+int ath10k_coredump_submit(struct ath10k *ar);
+struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar);
+int ath10k_coredump_create(struct ath10k *ar);
+int ath10k_coredump_register(struct ath10k *ar);
+void ath10k_coredump_unregister(struct ath10k *ar);
+void ath10k_coredump_destroy(struct ath10k *ar);
+
+const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline int ath10k_coredump_submit(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
+{
+ return NULL;
+}
+
+static inline int ath10k_coredump_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_coredump_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_coredump_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_coredump_destroy(struct ath10k *ar)
+{
+}
+
+static inline const struct ath10k_hw_mem_layout *
+ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
new file mode 100644
index 000000000..4e980e78b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -0,0 +1,2542 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
+
+#include "core.h"
+#include "debug.h"
+#include "hif.h"
+#include "wmi-ops.h"
+
+/* ms */
+#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
+
+#define ATH10K_DEBUG_CAL_DATA_LEN 12064
+
+void ath10k_info(struct ath10k *ar, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ar->dev, "%pV", &vaf);
+ trace_ath10k_log_info(ar, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_info);
+
+void ath10k_debug_print_hwfw_info(struct ath10k *ar)
+{
+ const struct firmware *firmware;
+ char fw_features[128] = {};
+ u32 crc = 0;
+
+ ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
+
+ ath10k_info(ar, "%s target 0x%08x chip_id 0x%08x sub %04x:%04x",
+ ar->hw_params.name,
+ ar->target_version,
+ ar->chip_id,
+ ar->id.subsystem_vendor, ar->id.subsystem_device);
+
+ ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n",
+ IS_ENABLED(CONFIG_ATH10K_DEBUG),
+ IS_ENABLED(CONFIG_ATH10K_DEBUGFS),
+ IS_ENABLED(CONFIG_ATH10K_TRACING),
+ IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED),
+ IS_ENABLED(CONFIG_NL80211_TESTMODE));
+
+ firmware = ar->normal_mode_fw.fw_file.firmware;
+ if (firmware)
+ crc = crc32_le(0, firmware->data, firmware->size);
+
+ ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n",
+ ar->hw->wiphy->fw_version,
+ ar->fw_api,
+ fw_features,
+ crc);
+}
+
+void ath10k_debug_print_board_info(struct ath10k *ar)
+{
+ char boardinfo[100];
+ const struct firmware *board;
+ u32 crc;
+
+ if (ar->id.bmi_ids_valid)
+ scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
+ ar->id.bmi_chip_id, ar->id.bmi_board_id);
+ else
+ scnprintf(boardinfo, sizeof(boardinfo), "N/A");
+
+ board = ar->normal_mode_fw.board;
+ if (!IS_ERR_OR_NULL(board))
+ crc = crc32_le(0, board->data, board->size);
+ else
+ crc = 0;
+
+ ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
+ ar->bd_api,
+ boardinfo,
+ crc);
+}
+
+void ath10k_debug_print_boot_info(struct ath10k *ar)
+{
+ ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n",
+ ar->htt.target_version_major,
+ ar->htt.target_version_minor,
+ ar->normal_mode_fw.fw_file.wmi_op_version,
+ ar->normal_mode_fw.fw_file.htt_op_version,
+ ath10k_cal_mode_str(ar->cal_mode),
+ ar->max_num_stations,
+ test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
+ !test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags));
+}
+
+void ath10k_print_driver_info(struct ath10k *ar)
+{
+ ath10k_debug_print_hwfw_info(ar);
+ ath10k_debug_print_board_info(ar);
+ ath10k_debug_print_boot_info(ar);
+}
+EXPORT_SYMBOL(ath10k_print_driver_info);
+
+void ath10k_err(struct ath10k *ar, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ar->dev, "%pV", &vaf);
+ trace_ath10k_log_err(ar, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_err);
+
+void ath10k_warn(struct ath10k *ar, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ar->dev, "%pV", &vaf);
+ trace_ath10k_log_warn(ar, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_warn);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+
+static ssize_t ath10k_read_wmi_services(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char *buf;
+ size_t len = 0, buf_len = 8192;
+ const char *name;
+ ssize_t ret_cnt;
+ bool enabled;
+ int i;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < WMI_SERVICE_MAX; i++) {
+ enabled = test_bit(i, ar->wmi.svc_map);
+ name = wmi_service_name(i);
+
+ if (!name) {
+ if (enabled)
+ len += scnprintf(buf + len, buf_len - len,
+ "%-40s %s (bit %d)\n",
+ "unknown", "enabled", i);
+
+ continue;
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "%-40s %s\n",
+ name, enabled ? "enabled" : "-");
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_wmi_services = {
+ .read = ath10k_read_wmi_services,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath10k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_fw_stats_peers_free(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_fw_extd_stats_peers_free(struct list_head *head)
+{
+ struct ath10k_fw_extd_stats_peer *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ar->debug.fw_stats.extended = false;
+ ath10k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_fw_stats stats = {};
+ bool is_start, is_started, is_end;
+ size_t num_peers;
+ size_t num_vdevs;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.peers);
+ INIT_LIST_HEAD(&stats.peers_extd);
+
+ spin_lock_bh(&ar->data_lock);
+ ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
+ if (ret) {
+ ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ /* Stat data may exceed htc-wmi buffer limit. In such case firmware
+ * splits the stats data and delivers it in a ping-pong fashion of
+ * request cmd-update event.
+ *
+ * However there is no explicit end-of-data. Instead start-of-data is
+ * used as an implicit one. This works as follows:
+ * a) discard stat update events until one with pdev stats is
+ * delivered - this skips session started at end of (b)
+ * b) consume stat update events until another one with pdev stats is
+ * delivered which is treated as end-of-data and is itself discarded
+ */
+ if (ath10k_peer_stats_enabled(ar))
+ ath10k_sta_update_rx_duration(ar, &stats);
+
+ if (ar->debug.fw_stats_done) {
+ if (!ath10k_peer_stats_enabled(ar))
+ ath10k_warn(ar, "received unsolicited stats update event\n");
+
+ goto free;
+ }
+
+ num_peers = ath10k_wmi_fw_stats_num_peers(&ar->debug.fw_stats.peers);
+ num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&ar->debug.fw_stats.vdevs);
+ is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+ is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
+ !list_empty(&stats.pdevs));
+
+ if (is_start)
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+
+ if (is_end)
+ ar->debug.fw_stats_done = true;
+
+ is_started = !list_empty(&ar->debug.fw_stats.pdevs);
+
+ if (is_started && !is_end) {
+ if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) {
+ /* Although this is unlikely impose a sane limit to
+ * prevent firmware from DoS-ing the host.
+ */
+ ath10k_fw_stats_peers_free(&ar->debug.fw_stats.peers);
+ ath10k_fw_extd_stats_peers_free(&ar->debug.fw_stats.peers_extd);
+ ath10k_warn(ar, "dropping fw peer stats\n");
+ goto free;
+ }
+
+ if (num_vdevs >= BITS_PER_LONG) {
+ ath10k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ ath10k_warn(ar, "dropping fw vdev stats\n");
+ goto free;
+ }
+
+ if (!list_empty(&stats.peers))
+ list_splice_tail_init(&stats.peers_extd,
+ &ar->debug.fw_stats.peers_extd);
+
+ list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
+ list_splice_tail_init(&stats.vdevs, &ar->debug.fw_stats.vdevs);
+ }
+
+ complete(&ar->debug.fw_stats_complete);
+
+free:
+ /* In some cases lists have been spliced and cleared. Free up
+ * resources if that is not the case.
+ */
+ ath10k_fw_stats_pdevs_free(&stats.pdevs);
+ ath10k_fw_stats_vdevs_free(&stats.vdevs);
+ ath10k_fw_stats_peers_free(&stats.peers);
+ ath10k_fw_extd_stats_peers_free(&stats.peers_extd);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_debug_fw_stats_request(struct ath10k *ar)
+{
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ timeout = jiffies + msecs_to_jiffies(1 * HZ);
+
+ ath10k_debug_fw_stats_reset(ar);
+
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ return -ETIMEDOUT;
+
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath10k_wmi_request_stats(ar, ar->fw_stats_req_mask);
+ if (ret) {
+ ath10k_warn(ar, "could not request stats (%d)\n", ret);
+ return ret;
+ }
+
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return 0;
+}
+
+static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request fw stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ret = ath10k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
+ if (ret) {
+ ath10k_warn(ar, "failed to fill fw stats: %d\n", ret);
+ goto err_free;
+ }
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_fw_stats_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_fw_stats = {
+ .open = ath10k_fw_stats_open,
+ .release = ath10k_fw_stats_release,
+ .read = ath10k_fw_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ size_t len = 0, buf_len = 500;
+ char *buf;
+
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_warm_reset_counter\t\t%d\n",
+ ar->stats.fw_warm_reset_counter);
+ len += scnprintf(buf + len, buf_len - len,
+ "fw_cold_reset_counter\t\t%d\n",
+ ar->stats.fw_cold_reset_counter);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_fw_reset_stats = {
+ .open = simple_open,
+ .read = ath10k_debug_fw_reset_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+/* This is a clean assert crash in firmware. */
+static int ath10k_debug_fw_assert(struct ath10k *ar)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + 16);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+
+ /* big enough number so that firmware asserts */
+ cmd->vdev_id = __cpu_to_le32(0x7ffe);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
+}
+
+static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`soft` - this will send WMI_FORCE_FW_HANG_ASSERT to firmware if FW supports that command.\n"
+ "`hard` - this will send to firmware command with illegal parameters causing firmware crash.\n"
+ "`assert` - this will send special illegal parameter to firmware to cause assert failure and crash.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32] = {0};
+ ssize_t rc;
+ int ret;
+
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!strcmp(buf, "soft")) {
+ ath10k_info(ar, "simulating soft firmware crash\n");
+ ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+ } else if (!strcmp(buf, "hard")) {
+ ath10k_info(ar, "simulating hard firmware crash\n");
+ /* 0x7fff is vdev id, and it is always out of range for all
+ * firmware variants in order to force a firmware crash.
+ */
+ ret = ath10k_wmi_vdev_set_param(ar, 0x7fff,
+ ar->wmi.vdev_param->rts_threshold,
+ 0);
+ } else if (!strcmp(buf, "assert")) {
+ ath10k_info(ar, "simulating firmware assert crash\n");
+ ret = ath10k_debug_fw_assert(ar);
+ } else if (!strcmp(buf, "hw-restart")) {
+ ath10k_info(ar, "user requested hw restart\n");
+ queue_work(ar->workqueue, &ar->restart_work);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath10k_warn(ar, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath10k_read_simulate_fw_crash,
+ .write = ath10k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_chip_id(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len;
+ char buf[50];
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->chip_id);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_chip_id = {
+ .read = ath10k_read_chip_id,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_addr_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[32];
+ size_t len = 0;
+ u32 reg_addr;
+
+ mutex_lock(&ar->conf_mutex);
+ reg_addr = ar->debug.reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, sizeof(buf) - len, "0x%x\n", reg_addr);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_reg_addr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_addr);
+ if (ret)
+ return ret;
+
+ if (!IS_ALIGNED(reg_addr, 4))
+ return -EFAULT;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.reg_addr = reg_addr;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_reg_addr = {
+ .read = ath10k_reg_addr_read,
+ .write = ath10k_reg_addr_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_reg_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 buf[48];
+ size_t len;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ reg_val = ath10k_hif_read32(ar, reg_addr);
+ len = scnprintf(buf, sizeof(buf), "0x%08x:0x%08x\n", reg_addr, reg_val);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_reg_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 reg_addr, reg_val;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ reg_addr = ar->debug.reg_addr;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &reg_val);
+ if (ret)
+ goto exit;
+
+ ath10k_hif_write32(ar, reg_addr, reg_val);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_reg_value = {
+ .read = ath10k_reg_value_read,
+ .write = ath10k_reg_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_mem_value_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_read(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to read address 0x%08x via diagnose window fnrom debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ ret = copy_to_user(user_buf, buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ count -= ret;
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_mem_value_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u8 *buf;
+ int ret;
+
+ if (*ppos < 0)
+ return -EINVAL;
+
+ if (!count)
+ return 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ buf = vmalloc(count);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ret = copy_from_user(buf, user_buf, count);
+ if (ret) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = ath10k_hif_diag_write(ar, *ppos, buf, count);
+ if (ret) {
+ ath10k_warn(ar, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
+ (u32)(*ppos), ret);
+ goto exit;
+ }
+
+ *ppos += count;
+ ret = count;
+
+exit:
+ vfree(buf);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_mem_value = {
+ .read = ath10k_mem_value_read,
+ .write = ath10k_mem_value_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_htt_stats_req(struct ath10k *ar)
+{
+ u64 cookie;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->debug.htt_stats_mask == 0)
+ /* htt stats are disabled */
+ return 0;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return 0;
+
+ cookie = get_jiffies_64();
+
+ ret = ath10k_htt_h2t_stats_req(&ar->htt, ar->debug.htt_stats_mask,
+ cookie);
+ if (ret) {
+ ath10k_warn(ar, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ queue_delayed_work(ar->workqueue, &ar->debug.htt_stats_dwork,
+ msecs_to_jiffies(ATH10K_DEBUG_HTT_STATS_INTERVAL));
+
+ return 0;
+}
+
+static void ath10k_debug_htt_stats_dwork(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ debug.htt_stats_dwork.work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_debug_htt_stats_req(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static ssize_t ath10k_read_htt_stats_mask(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%lu\n", ar->debug.htt_stats_mask);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_stats_mask(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long mask;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &mask);
+ if (ret)
+ return ret;
+
+ /* max 8 bit masks (for now) */
+ if (mask > 0xff)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats_mask = mask;
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ goto out;
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_mask = {
+ .read = ath10k_read_htt_stats_mask,
+ .write = ath10k_write_htt_stats_mask,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_htt_max_amsdu_ampdu(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[64];
+ u8 amsdu, ampdu;
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+
+ amsdu = ar->htt.max_num_amsdu;
+ ampdu = ar->htt.max_num_ampdu;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = scnprintf(buf, sizeof(buf), "%u %u\n", amsdu, ampdu);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_htt_max_amsdu_ampdu(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int res;
+ char buf[64] = {0};
+ unsigned int amsdu, ampdu;
+
+ res = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (res <= 0)
+ return res;
+
+ res = sscanf(buf, "%u %u", &amsdu, &ampdu);
+
+ if (res != 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ res = ath10k_htt_h2t_aggr_cfg_msg(&ar->htt, ampdu, amsdu);
+ if (res)
+ goto out;
+
+ res = count;
+ ar->htt.max_num_amsdu = amsdu;
+ ar->htt.max_num_ampdu = ampdu;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return res;
+}
+
+static const struct file_operations fops_htt_max_amsdu_ampdu = {
+ .read = ath10k_read_htt_max_amsdu_ampdu,
+ .write = ath10k_write_htt_max_amsdu_ampdu,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_fw_dbglog(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len;
+ char buf[96];
+
+ len = scnprintf(buf, sizeof(buf), "0x%16llx %u\n",
+ ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ char buf[96] = {0};
+ unsigned int log_level;
+ u64 mask;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%llx %u", &mask, &log_level);
+
+ if (!ret)
+ return -EINVAL;
+
+ if (ret == 1)
+ /* default if user did not specify */
+ log_level = ATH10K_DBGLOG_LEVEL_WARN;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.fw_dbglog_mask = mask;
+ ar->debug.fw_dbglog_level = log_level;
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+ ar->debug.fw_dbglog_level);
+ if (ret) {
+ ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
+ ret);
+ goto exit;
+ }
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+/* TODO: Would be nice to always support ethtool stats, would need to
+ * move the stats storage out of ath10k_debug, or always have ath10k_debug
+ * struct available..
+ */
+
+/* This generally cooresponds to the debugfs fw_stats file */
+static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_pkts_nic",
+ "tx_bytes_nic",
+ "rx_pkts_nic",
+ "rx_bytes_nic",
+ "d_noise_floor",
+ "d_cycle_count",
+ "d_phy_error",
+ "d_rts_bad",
+ "d_rts_good",
+ "d_tx_power", /* in .5 dbM I think */
+ "d_rx_crc_err", /* fcs_bad */
+ "d_no_beacon",
+ "d_tx_mpdus_queued",
+ "d_tx_msdu_queued",
+ "d_tx_msdu_dropped",
+ "d_local_enqued",
+ "d_local_freed",
+ "d_tx_ppdu_hw_queued",
+ "d_tx_ppdu_reaped",
+ "d_tx_fifo_underrun",
+ "d_tx_ppdu_abort",
+ "d_tx_mpdu_requed",
+ "d_tx_excessive_retries",
+ "d_tx_hw_rate",
+ "d_tx_dropped_sw_retries",
+ "d_tx_illegal_rate",
+ "d_tx_continuous_xretries",
+ "d_tx_timeout",
+ "d_tx_mpdu_txop_limit",
+ "d_pdev_resets",
+ "d_rx_mid_ppdu_route_change",
+ "d_rx_status",
+ "d_rx_extra_frags_ring0",
+ "d_rx_extra_frags_ring1",
+ "d_rx_extra_frags_ring2",
+ "d_rx_extra_frags_ring3",
+ "d_rx_msdu_htt",
+ "d_rx_mpdu_htt",
+ "d_rx_msdu_stack",
+ "d_rx_mpdu_stack",
+ "d_rx_phy_err",
+ "d_rx_phy_err_drops",
+ "d_rx_mpdu_errors", /* FCS, MIC, ENC */
+ "d_fw_crash_count",
+ "d_fw_warm_reset_count",
+ "d_fw_cold_reset_count",
+};
+
+#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *ath10k_gstrings_stats,
+ sizeof(ath10k_gstrings_stats));
+}
+
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ATH10K_SSTATS_LEN;
+
+ return 0;
+}
+
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ath10k *ar = hw->priv;
+ static const struct ath10k_fw_stats_pdev zero_stats = {};
+ const struct ath10k_fw_stats_pdev *pdev_stats;
+ int i = 0, ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_ON) {
+ ret = ath10k_debug_fw_stats_request(ar);
+ if (ret) {
+ /* just print a warning and try to use older results */
+ ath10k_warn(ar,
+ "failed to get fw stats for ethtool: %d\n",
+ ret);
+ }
+ }
+
+ pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs,
+ struct ath10k_fw_stats_pdev,
+ list);
+ if (!pdev_stats) {
+ /* no results available so just return zeroes */
+ pdev_stats = &zero_stats;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */
+ data[i++] = 0; /* tx bytes */
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = 0; /* rx bytes */
+ data[i++] = pdev_stats->ch_noise_floor;
+ data[i++] = pdev_stats->cycle_count;
+ data[i++] = pdev_stats->phy_err_count;
+ data[i++] = pdev_stats->rts_bad;
+ data[i++] = pdev_stats->rts_good;
+ data[i++] = pdev_stats->chan_tx_power;
+ data[i++] = pdev_stats->fcs_bad;
+ data[i++] = pdev_stats->no_beacons;
+ data[i++] = pdev_stats->mpdu_enqued;
+ data[i++] = pdev_stats->msdu_enqued;
+ data[i++] = pdev_stats->wmm_drop;
+ data[i++] = pdev_stats->local_enqued;
+ data[i++] = pdev_stats->local_freed;
+ data[i++] = pdev_stats->hw_queued;
+ data[i++] = pdev_stats->hw_reaped;
+ data[i++] = pdev_stats->underrun;
+ data[i++] = pdev_stats->tx_abort;
+ data[i++] = pdev_stats->mpdus_requed;
+ data[i++] = pdev_stats->tx_ko;
+ data[i++] = pdev_stats->data_rc;
+ data[i++] = pdev_stats->sw_retry_failure;
+ data[i++] = pdev_stats->illgl_rate_phy_err;
+ data[i++] = pdev_stats->pdev_cont_xretry;
+ data[i++] = pdev_stats->pdev_tx_timeout;
+ data[i++] = pdev_stats->txop_ovf;
+ data[i++] = pdev_stats->pdev_resets;
+ data[i++] = pdev_stats->mid_ppdu_route_change;
+ data[i++] = pdev_stats->status_rcvd;
+ data[i++] = pdev_stats->r0_frags;
+ data[i++] = pdev_stats->r1_frags;
+ data[i++] = pdev_stats->r2_frags;
+ data[i++] = pdev_stats->r3_frags;
+ data[i++] = pdev_stats->htt_msdus;
+ data[i++] = pdev_stats->htt_mpdus;
+ data[i++] = pdev_stats->loc_msdus;
+ data[i++] = pdev_stats->loc_mpdus;
+ data[i++] = pdev_stats->phy_errs;
+ data[i++] = pdev_stats->phy_err_drop;
+ data[i++] = pdev_stats->mpdu_errs;
+ data[i++] = ar->stats.fw_crash_counter;
+ data[i++] = ar->stats.fw_warm_reset_counter;
+ data[i++] = ar->stats.fw_cold_reset_counter;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ WARN_ON(i != ATH10K_SSTATS_LEN);
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .read = ath10k_read_fw_dbglog,
+ .write = ath10k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_cal_data_fetch(struct ath10k *ar)
+{
+ u32 hi_addr;
+ __le32 addr;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ar->hw_params.cal_data_len > ATH10K_DEBUG_CAL_DATA_LEN))
+ return -EINVAL;
+
+ hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
+
+ ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
+ if (ret) {
+ ath10k_warn(ar, "failed to read hi_board_data address: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_UTF) {
+ ath10k_debug_cal_data_fetch(ar);
+ }
+
+ file->private_data = ar;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static ssize_t ath10k_debug_cal_data_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+
+ mutex_lock(&ar->conf_mutex);
+
+ count = simple_read_from_buffer(user_buf, count, ppos,
+ ar->debug.cal_data,
+ ar->hw_params.cal_data_len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static ssize_t ath10k_write_ani_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ u8 enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->ani_enabled == enable) {
+ ret = count;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->ani_enable,
+ enable);
+ if (ret) {
+ ath10k_warn(ar, "ani_enable failed from debugfs: %d\n", ret);
+ goto exit;
+ }
+ ar->ani_enabled = enable;
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_ani_enable(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ani_enabled);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ani_enable = {
+ .read = ath10k_read_ani_enable,
+ .write = ath10k_write_ani_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static const struct file_operations fops_cal_data = {
+ .open = ath10k_debug_cal_data_open,
+ .read = ath10k_debug_cal_data_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_read_nf_cal_period(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len;
+ char buf[32];
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->debug.nf_cal_period);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_write_nf_cal_period(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long period;
+ int ret;
+
+ ret = kstrtoul_from_user(user_buf, count, 0, &period);
+ if (ret)
+ return ret;
+
+ if (period > WMI_PDEV_PARAM_CAL_PERIOD_MAX)
+ return -EINVAL;
+
+ /* there's no way to switch back to the firmware default */
+ if (period == 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.nf_cal_period = period;
+
+ if (ar->state != ATH10K_STATE_ON) {
+ /* firmware is not running, nothing else to do */
+ ret = count;
+ goto exit;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->cal_period,
+ ar->debug.nf_cal_period);
+ if (ret) {
+ ath10k_warn(ar, "cal period cfg failed from debugfs: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_nf_cal_period = {
+ .read = ath10k_read_nf_cal_period,
+ .write = ath10k_write_nf_cal_period,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define ATH10K_TPC_CONFIG_BUF_SIZE (1024 * 1024)
+
+static int ath10k_debug_tpc_stats_request(struct ath10k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->debug.tpc_complete);
+
+ ret = ath10k_wmi_pdev_get_tpc_config(ar, WMI_TPC_CONFIG_PARAM);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc config: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->debug.tpc_complete,
+ 1 * HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats)
+{
+ spin_lock_bh(&ar->data_lock);
+
+ kfree(ar->debug.tpc_stats);
+ ar->debug.tpc_stats = tpc_stats;
+ complete(&ar->debug.tpc_complete);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void
+ath10k_debug_tpc_stats_final_process(struct ath10k *ar,
+ struct ath10k_tpc_stats_final *tpc_stats)
+{
+ spin_lock_bh(&ar->data_lock);
+
+ kfree(ar->debug.tpc_stats_final);
+ ar->debug.tpc_stats_final = tpc_stats;
+ complete(&ar->debug.tpc_complete);
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
+ unsigned int j, char *buf, size_t *len)
+{
+ int i;
+ size_t buf_len;
+ static const char table_str[][5] = { "CDD",
+ "STBC",
+ "TXBF" };
+ static const char pream_str[][6] = { "CCK",
+ "OFDM",
+ "HT20",
+ "HT40",
+ "VHT20",
+ "VHT40",
+ "VHT80",
+ "HTCUP" };
+
+ buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "********************************\n");
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "******************* %s POWER TABLE ****************\n",
+ table_str[j]);
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "********************************\n");
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "No. Preamble Rate_code ");
+
+ for (i = 0; i < tpc_stats->num_tx_chain; i++)
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "tpc_value%d ", i);
+
+ *len += scnprintf(buf + *len, buf_len - *len, "\n");
+
+ for (i = 0; i < tpc_stats->rate_max; i++) {
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "%8d %s 0x%2x %s\n", i,
+ pream_str[tpc_stats->tpc_table[j].pream_idx[i]],
+ tpc_stats->tpc_table[j].rate_code[i],
+ tpc_stats->tpc_table[j].tpc_value[i]);
+ }
+
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "***********************************\n");
+}
+
+static void ath10k_tpc_stats_fill(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats,
+ char *buf)
+{
+ int j;
+ size_t len, buf_len;
+
+ len = 0;
+ buf_len = ATH10K_TPC_CONFIG_BUF_SIZE;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!tpc_stats) {
+ ath10k_warn(ar, "failed to get tpc stats\n");
+ goto unlock;
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "*************************************\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "TPC config for channel %4d mode %d\n",
+ tpc_stats->chan_freq,
+ tpc_stats->phy_mode);
+ len += scnprintf(buf + len, buf_len - len,
+ "*************************************\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "CTL = 0x%2x Reg. Domain = %2d\n",
+ tpc_stats->ctl,
+ tpc_stats->reg_domain);
+ len += scnprintf(buf + len, buf_len - len,
+ "Antenna Gain = %2d Reg. Max Antenna Gain = %2d\n",
+ tpc_stats->twice_antenna_gain,
+ tpc_stats->twice_antenna_reduction);
+ len += scnprintf(buf + len, buf_len - len,
+ "Power Limit = %2d Reg. Max Power = %2d\n",
+ tpc_stats->power_limit,
+ tpc_stats->twice_max_rd_power / 2);
+ len += scnprintf(buf + len, buf_len - len,
+ "Num tx chains = %2d Num supported rates = %2d\n",
+ tpc_stats->num_tx_chain,
+ tpc_stats->rate_max);
+
+ for (j = 0; j < WMI_TPC_FLAG; j++) {
+ switch (j) {
+ case WMI_TPC_TABLE_TYPE_CDD:
+ if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+ len += scnprintf(buf + len, buf_len - len,
+ "CDD not supported\n");
+ break;
+ }
+
+ ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+ break;
+ case WMI_TPC_TABLE_TYPE_STBC:
+ if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+ len += scnprintf(buf + len, buf_len - len,
+ "STBC not supported\n");
+ break;
+ }
+
+ ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ if (tpc_stats->flag[j] == ATH10K_TPC_TABLE_TYPE_FLAG) {
+ len += scnprintf(buf + len, buf_len - len,
+ "TXBF not supported\n***************************\n");
+ break;
+ }
+
+ ath10k_tpc_stats_print(tpc_stats, j, buf, &len);
+ break;
+ default:
+ len += scnprintf(buf + len, buf_len - len,
+ "Invalid Type\n");
+ break;
+ }
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static int ath10k_tpc_stats_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_tpc_stats_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc config stats: %d\n",
+ ret);
+ goto err_free;
+ }
+
+ ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_tpc_stats_release(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_tpc_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tpc_stats = {
+ .open = ath10k_tpc_stats_open,
+ .release = ath10k_tpc_stats_release,
+ .read = ath10k_tpc_stats_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_start(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_debug_htt_stats_req(ar);
+ if (ret)
+ /* continue normally anyway, this isn't serious */
+ ath10k_warn(ar, "failed to start htt stats workqueue: %d\n",
+ ret);
+
+ if (ar->debug.fw_dbglog_mask) {
+ ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
+ ATH10K_DBGLOG_LEVEL_WARN);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "failed to enable dbglog during start: %d",
+ ret);
+ }
+
+ if (ar->pktlog_filter) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar,
+ ar->pktlog_filter);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->pktlog_filter, ret);
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ }
+
+ if (ar->debug.nf_cal_period &&
+ !test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar,
+ ar->wmi.pdev_param->cal_period,
+ ar->debug.nf_cal_period);
+ if (ret)
+ /* not serious */
+ ath10k_warn(ar, "cal period cfg failed from debug start: %d\n",
+ ret);
+ }
+
+ return ret;
+}
+
+void ath10k_debug_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features))
+ ath10k_debug_cal_data_fetch(ar);
+
+ /* Must not use _sync to avoid deadlock, we do that in
+ * ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
+ * warning from del_timer().
+ */
+ if (ar->debug.htt_stats_mask != 0)
+ cancel_delayed_work(&ar->debug.htt_stats_dwork);
+
+ ath10k_wmi_pdev_pktlog_disable(ar);
+}
+
+static ssize_t ath10k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ struct ath10k_vif *arvif;
+
+ /* Just check for for the first vif alone, as all the vifs will be
+ * sharing the same channel and if the channel is disabled, all the
+ * vifs will share the same 'is_started' state.
+ */
+ arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list);
+ if (!arvif->is_started)
+ return -EINVAL;
+
+ ieee80211_radar_detected(ar->hw);
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath10k_write_simulate_radar,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#define ATH10K_DFS_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_stats.p))
+
+#define ATH10K_DFS_POOL_STAT(s, p) (\
+ len += scnprintf(buf + len, size - len, "%-28s : %10u\n", s, \
+ ar->debug.dfs_pool_stats.p))
+
+static ssize_t ath10k_read_dfs_stats(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ int retval = 0, len = 0;
+ const int size = 8000;
+ struct ath10k *ar = file->private_data;
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!ar->dfs_detector) {
+ len += scnprintf(buf + len, size - len, "DFS not enabled\n");
+ goto exit;
+ }
+
+ ar->debug.dfs_pool_stats =
+ ar->dfs_detector->get_stats(ar->dfs_detector);
+
+ len += scnprintf(buf + len, size - len, "Pulse detector statistics:\n");
+
+ ATH10K_DFS_STAT("reported phy errors", phy_errors);
+ ATH10K_DFS_STAT("pulse events reported", pulses_total);
+ ATH10K_DFS_STAT("DFS pulses detected", pulses_detected);
+ ATH10K_DFS_STAT("DFS pulses discarded", pulses_discarded);
+ ATH10K_DFS_STAT("Radars detected", radar_detected);
+
+ len += scnprintf(buf + len, size - len, "Global Pool statistics:\n");
+ ATH10K_DFS_POOL_STAT("Pool references", pool_reference);
+ ATH10K_DFS_POOL_STAT("Pulses allocated", pulse_allocated);
+ ATH10K_DFS_POOL_STAT("Pulses alloc error", pulse_alloc_error);
+ ATH10K_DFS_POOL_STAT("Pulses in use", pulse_used);
+ ATH10K_DFS_POOL_STAT("Seqs. allocated", pseq_allocated);
+ ATH10K_DFS_POOL_STAT("Seqs. alloc error", pseq_alloc_error);
+ ATH10K_DFS_POOL_STAT("Seqs. in use", pseq_used);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_dfs_stats = {
+ .read = ath10k_read_dfs_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ar->pktlog_filter = filter;
+ ret = count;
+ goto out;
+ }
+
+ if (filter == ar->pktlog_filter) {
+ ret = count;
+ goto out;
+ }
+
+ if (filter) {
+ ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
+ ar->pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath10k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+ ar->pktlog_filter = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->pktlog_filter);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath10k_read_pktlog_filter,
+ .write = ath10k_write_pktlog_filter,
+ .open = simple_open
+};
+
+static ssize_t ath10k_write_quiet_period(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ u32 period;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &period))
+ return -EINVAL;
+
+ if (period < ATH10K_QUIET_PERIOD_MIN) {
+ ath10k_warn(ar, "Quiet period %u can not be lesser than 25ms\n",
+ period);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.quiet_period = period;
+ ath10k_thermal_set_throttling(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static ssize_t ath10k_read_quiet_period(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->thermal.quiet_period);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_quiet_period = {
+ .read = ath10k_read_quiet_period,
+ .write = ath10k_write_quiet_period,
+ .open = simple_open
+};
+
+static ssize_t ath10k_write_btcoex(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t buf_size;
+ int ret;
+ bool val;
+ u32 pdev_param;
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ if (strtobool(buf, &val) != 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) {
+ ret = count;
+ goto exit;
+ }
+
+ pdev_param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable btcoex: %d\n", ret);
+ ret = count;
+ goto exit;
+ }
+ } else {
+ ath10k_info(ar, "restarting firmware due to btcoex change");
+ queue_work(ar->workqueue, &ar->restart_work);
+ }
+
+ if (val)
+ set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+ else
+ clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath10k_read_btcoex(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags));
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_btcoex = {
+ .read = ath10k_read_btcoex,
+ .write = ath10k_write_btcoex,
+ .open = simple_open
+};
+
+static ssize_t ath10k_write_peer_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t buf_size;
+ int ret;
+ bool val;
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ if (strtobool(buf, &val) != 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) {
+ ret = count;
+ goto exit;
+ }
+
+ if (val)
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+ else
+ clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+ ath10k_info(ar, "restarting firmware due to Peer stats change");
+
+ queue_work(ar->workqueue, &ar->restart_work);
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags));
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_stats = {
+ .read = ath10k_read_peer_stats,
+ .write = ath10k_write_peer_stats,
+ .open = simple_open
+};
+
+static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ size_t len = 0, buf_len = 4096;
+ ssize_t ret_cnt;
+ char *buf;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "firmware-N.bin\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data,
+ ar->normal_mode_fw.fw_file.firmware->size));
+ len += scnprintf(buf + len, buf_len - len,
+ "athwlan\t\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data,
+ ar->normal_mode_fw.fw_file.firmware_len));
+ len += scnprintf(buf + len, buf_len - len,
+ "otp\t\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len));
+ len += scnprintf(buf + len, buf_len - len,
+ "codeswap\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data,
+ ar->normal_mode_fw.fw_file.codeswap_len));
+ len += scnprintf(buf + len, buf_len - len,
+ "board-N.bin\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.board->data,
+ ar->normal_mode_fw.board->size));
+ len += scnprintf(buf + len, buf_len - len,
+ "board\t\t\t%08x\n",
+ crc32_le(0, ar->normal_mode_fw.board_data,
+ ar->normal_mode_fw.board_len));
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_fw_checksums = {
+ .read = ath10k_debug_fw_checksums_read,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_sta_tid_stats_mask_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "0x%08x\n", ar->sta_tid_stats_mask);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_sta_tid_stats_mask_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ ssize_t len;
+ u32 mask;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoint(buf, 0, &mask))
+ return -EINVAL;
+
+ ar->sta_tid_stats_mask = mask;
+
+ return len;
+}
+
+static const struct file_operations fops_sta_tid_stats_mask = {
+ .read = ath10k_sta_tid_stats_mask_read,
+ .write = ath10k_sta_tid_stats_mask_write,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath10k_debug_tpc_stats_final_request(struct ath10k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->debug.tpc_complete);
+
+ ret = ath10k_wmi_pdev_get_tpc_table_cmdid(ar, WMI_TPC_CONFIG_PARAM);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc table cmdid: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->debug.tpc_complete,
+ 1 * HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath10k_tpc_stats_final_open(struct inode *inode, struct file *file)
+{
+ struct ath10k *ar = inode->i_private;
+ void *buf;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH10K_TPC_CONFIG_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ret = ath10k_debug_tpc_stats_final_request(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request tpc stats final: %d\n",
+ ret);
+ goto err_free;
+ }
+
+ ath10k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_tpc_stats_final_release(struct inode *inode,
+ struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath10k_tpc_stats_final_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ unsigned int len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_tpc_stats_final = {
+ .open = ath10k_tpc_stats_final_open,
+ .release = ath10k_tpc_stats_final_release,
+ .read = ath10k_tpc_stats_final_read,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_write_warm_hw_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ int ret;
+ bool val;
+
+ if (kstrtobool_from_user(user_buf, count, &val))
+ return -EFAULT;
+
+ if (!val)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(WMI_SERVICE_RESET_CHIP, ar->wmi.svc_map)))
+ ath10k_warn(ar, "wmi service for reset chip is not available\n");
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pdev_reset,
+ WMI_RST_MODE_WARM_RESET);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to enable warm hw reset: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_warm_hw_reset = {
+ .write = ath10k_write_warm_hw_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath10k_debug_create(struct ath10k *ar)
+{
+ ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
+ if (!ar->debug.cal_data)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.peers_extd);
+
+ return 0;
+}
+
+void ath10k_debug_destroy(struct ath10k *ar)
+{
+ vfree(ar->debug.cal_data);
+ ar->debug.cal_data = NULL;
+
+ ath10k_debug_fw_stats_reset(ar);
+
+ kfree(ar->debug.tpc_stats);
+ kfree(ar->debug.tpc_stats_final);
+}
+
+int ath10k_debug_register(struct ath10k *ar)
+{
+ ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
+ ar->hw->wiphy->debugfsdir);
+ if (IS_ERR_OR_NULL(ar->debug.debugfs_phy)) {
+ if (IS_ERR(ar->debug.debugfs_phy))
+ return PTR_ERR(ar->debug.debugfs_phy);
+
+ return -ENOMEM;
+ }
+
+ INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
+ ath10k_debug_htt_stats_dwork);
+
+ init_completion(&ar->debug.tpc_complete);
+ init_completion(&ar->debug.fw_stats_complete);
+
+ debugfs_create_file("fw_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_fw_stats);
+
+ debugfs_create_file("fw_reset_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_fw_reset_stats);
+
+ debugfs_create_file("wmi_services", 0400, ar->debug.debugfs_phy, ar,
+ &fops_wmi_services);
+
+ debugfs_create_file("simulate_fw_crash", 0600, ar->debug.debugfs_phy, ar,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("reg_addr", 0600, ar->debug.debugfs_phy, ar,
+ &fops_reg_addr);
+
+ debugfs_create_file("reg_value", 0600, ar->debug.debugfs_phy, ar,
+ &fops_reg_value);
+
+ debugfs_create_file("mem_value", 0600, ar->debug.debugfs_phy, ar,
+ &fops_mem_value);
+
+ debugfs_create_file("chip_id", 0400, ar->debug.debugfs_phy, ar,
+ &fops_chip_id);
+
+ debugfs_create_file("htt_stats_mask", 0600, ar->debug.debugfs_phy, ar,
+ &fops_htt_stats_mask);
+
+ debugfs_create_file("htt_max_amsdu_ampdu", 0600, ar->debug.debugfs_phy, ar,
+ &fops_htt_max_amsdu_ampdu);
+
+ debugfs_create_file("fw_dbglog", 0600, ar->debug.debugfs_phy, ar,
+ &fops_fw_dbglog);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NON_BMI,
+ ar->normal_mode_fw.fw_file.fw_features)) {
+ debugfs_create_file("cal_data", 0400, ar->debug.debugfs_phy, ar,
+ &fops_cal_data);
+
+ debugfs_create_file("nf_cal_period", 0600, ar->debug.debugfs_phy, ar,
+ &fops_nf_cal_period);
+ }
+
+ debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar,
+ &fops_ani_enable);
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy,
+ ar, &fops_simulate_radar);
+
+ debugfs_create_bool("dfs_block_radar_events", 0200,
+ ar->debug.debugfs_phy,
+ &ar->dfs_block_radar_events);
+
+ debugfs_create_file("dfs_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_dfs_stats);
+ }
+
+ debugfs_create_file("pktlog_filter", 0644, ar->debug.debugfs_phy, ar,
+ &fops_pktlog_filter);
+
+ debugfs_create_file("quiet_period", 0644, ar->debug.debugfs_phy, ar,
+ &fops_quiet_period);
+
+ debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_phy, ar,
+ &fops_tpc_stats);
+
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+ debugfs_create_file("btcoex", 0644, ar->debug.debugfs_phy, ar,
+ &fops_btcoex);
+
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ debugfs_create_file("peer_stats", 0644, ar->debug.debugfs_phy, ar,
+ &fops_peer_stats);
+
+ debugfs_create_file("fw_checksums", 0400, ar->debug.debugfs_phy, ar,
+ &fops_fw_checksums);
+
+ if (IS_ENABLED(CONFIG_MAC80211_DEBUGFS))
+ debugfs_create_file("sta_tid_stats_mask", 0600,
+ ar->debug.debugfs_phy,
+ ar, &fops_sta_tid_stats_mask);
+
+ if (test_bit(WMI_SERVICE_TPC_STATS_FINAL, ar->wmi.svc_map))
+ debugfs_create_file("tpc_stats_final", 0400,
+ ar->debug.debugfs_phy, ar,
+ &fops_tpc_stats_final);
+
+ debugfs_create_file("warm_hw_reset", 0600, ar->debug.debugfs_phy, ar,
+ &fops_warm_hw_reset);
+
+ return 0;
+}
+
+void ath10k_debug_unregister(struct ath10k *ar)
+{
+ cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
+}
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+void ath10k_dbg(struct ath10k *ar, enum ath10k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath10k_debug_mask & mask)
+ dev_printk(KERN_DEBUG, ar->dev, "%pV", &vaf);
+
+ trace_ath10k_log_dbg(ar, mask, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(ath10k_dbg);
+
+void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ size_t linebuflen;
+ const void *ptr;
+
+ if (ath10k_debug_mask & mask) {
+ if (msg)
+ ath10k_dbg(ar, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf);
+ }
+ }
+
+ /* tracing code doesn't like null strings :/ */
+ trace_ath10k_log_dbg_dump(ar, msg ? msg : "", prefix ? prefix : "",
+ buf, len);
+}
+EXPORT_SYMBOL(ath10k_dbg_dump);
+
+#endif /* CONFIG_ATH10K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
new file mode 100644
index 000000000..0afca5c10
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -0,0 +1,267 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _DEBUG_H_
+#define _DEBUG_H_
+
+#include <linux/types.h>
+#include "trace.h"
+
+enum ath10k_debug_mask {
+ ATH10K_DBG_PCI = 0x00000001,
+ ATH10K_DBG_WMI = 0x00000002,
+ ATH10K_DBG_HTC = 0x00000004,
+ ATH10K_DBG_HTT = 0x00000008,
+ ATH10K_DBG_MAC = 0x00000010,
+ ATH10K_DBG_BOOT = 0x00000020,
+ ATH10K_DBG_PCI_DUMP = 0x00000040,
+ ATH10K_DBG_HTT_DUMP = 0x00000080,
+ ATH10K_DBG_MGMT = 0x00000100,
+ ATH10K_DBG_DATA = 0x00000200,
+ ATH10K_DBG_BMI = 0x00000400,
+ ATH10K_DBG_REGULATORY = 0x00000800,
+ ATH10K_DBG_TESTMODE = 0x00001000,
+ ATH10K_DBG_WMI_PRINT = 0x00002000,
+ ATH10K_DBG_PCI_PS = 0x00004000,
+ ATH10K_DBG_AHB = 0x00008000,
+ ATH10K_DBG_SDIO = 0x00010000,
+ ATH10K_DBG_SDIO_DUMP = 0x00020000,
+ ATH10K_DBG_USB = 0x00040000,
+ ATH10K_DBG_USB_BULK = 0x00080000,
+ ATH10K_DBG_SNOC = 0x00100000,
+ ATH10K_DBG_ANY = 0xffffffff,
+};
+
+enum ath10k_pktlog_filter {
+ ATH10K_PKTLOG_RX = 0x000000001,
+ ATH10K_PKTLOG_TX = 0x000000002,
+ ATH10K_PKTLOG_RCFIND = 0x000000004,
+ ATH10K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
+ ATH10K_PKTLOG_PEER_STATS = 0x000000040,
+ ATH10K_PKTLOG_ANY = 0x00000005f,
+};
+
+enum ath10k_dbg_aggr_mode {
+ ATH10K_DBG_AGGR_MODE_AUTO,
+ ATH10K_DBG_AGGR_MODE_MANUAL,
+ ATH10K_DBG_AGGR_MODE_MAX,
+};
+
+/* Types of packet log events */
+enum ath_pktlog_type {
+ ATH_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH_PKTLOG_TYPE_TX_STAT,
+};
+
+struct ath10k_pktlog_hdr {
+ __le16 flags;
+ __le16 missed_cnt;
+ __le16 log_type; /* Type of log information foll this header */
+ __le16 size; /* Size of variable length log information in bytes */
+ __le32 timestamp;
+ u8 payload[0];
+} __packed;
+
+/* FIXME: How to calculate the buffer size sanely? */
+#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+extern unsigned int ath10k_debug_mask;
+
+__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
+__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
+
+void ath10k_debug_print_hwfw_info(struct ath10k *ar);
+void ath10k_debug_print_board_info(struct ath10k *ar);
+void ath10k_debug_print_boot_info(struct ath10k *ar);
+void ath10k_print_driver_info(struct ath10k *ar);
+
+#ifdef CONFIG_ATH10K_DEBUGFS
+int ath10k_debug_start(struct ath10k *ar);
+void ath10k_debug_stop(struct ath10k *ar);
+int ath10k_debug_create(struct ath10k *ar);
+void ath10k_debug_destroy(struct ath10k *ar);
+int ath10k_debug_register(struct ath10k *ar);
+void ath10k_debug_unregister(struct ath10k *ar);
+void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats);
+void
+ath10k_debug_tpc_stats_final_process(struct ath10k *ar,
+ struct ath10k_tpc_stats_final *tpc_stats);
+void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
+
+#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
+
+void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data);
+int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset);
+void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data);
+
+static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
+{
+ return ar->debug.fw_dbglog_mask;
+}
+
+static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
+{
+ return ar->debug.fw_dbglog_level;
+}
+
+#else
+
+static inline int ath10k_debug_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_stop(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_debug_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_destroy(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_debug_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_debug_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath10k_debug_tpc_stats_process(struct ath10k *ar,
+ struct ath10k_tpc_stats *tpc_stats)
+{
+ kfree(tpc_stats);
+}
+
+static inline void
+ath10k_debug_tpc_stats_final_process(struct ath10k *ar,
+ struct ath10k_tpc_stats_final *tpc_stats)
+{
+ kfree(tpc_stats);
+}
+
+static inline void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer,
+ int len)
+{
+}
+
+static inline u64 ath10k_debug_get_fw_dbglog_mask(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline u32 ath10k_debug_get_fw_dbglog_level(struct ath10k *ar)
+{
+ return 0;
+}
+
+#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
+
+#define ath10k_debug_get_et_strings NULL
+#define ath10k_debug_get_et_sset_count NULL
+#define ath10k_debug_get_et_stats NULL
+
+#endif /* CONFIG_ATH10K_DEBUGFS */
+#ifdef CONFIG_MAC80211_DEBUGFS
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats);
+void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
+ unsigned long int num_msdus,
+ enum ath10k_pkt_rx_err err,
+ unsigned long int unchain_cnt,
+ unsigned long int drop_cnt,
+ unsigned long int drop_cnt_filter,
+ unsigned long int queued_msdus);
+void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar,
+ u16 peer_id, u8 tid,
+ struct htt_rx_indication_mpdu_range *ranges,
+ int num_ranges);
+#else
+static inline
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+}
+
+static inline
+void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
+ unsigned long int num_msdus,
+ enum ath10k_pkt_rx_err err,
+ unsigned long int unchain_cnt,
+ unsigned long int drop_cnt,
+ unsigned long int drop_cnt_filter,
+ unsigned long int queued_msdus)
+{
+}
+
+static inline
+void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar,
+ u16 peer_id, u8 tid,
+ struct htt_rx_indication_mpdu_range *ranges,
+ int num_ranges)
+{
+}
+#endif /* CONFIG_MAC80211_DEBUGFS */
+
+#ifdef CONFIG_ATH10K_DEBUG
+__printf(3, 4) void ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *fmt, ...);
+void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH10K_DEBUG */
+
+static inline int ath10k_dbg(struct ath10k *ar,
+ enum ath10k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath10k_dbg_dump(struct ath10k *ar,
+ enum ath10k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH10K_DEBUG */
+#endif /* _DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
new file mode 100644
index 000000000..6f10331e9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -0,0 +1,640 @@
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi-ops.h"
+#include "txrx.h"
+#include "debug.h"
+
+static void ath10k_rx_stats_update_amsdu_subfrm(struct ath10k *ar,
+ struct ath10k_sta_tid_stats *stats,
+ u32 msdu_count)
+{
+ if (msdu_count == 1)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_1]++;
+ else if (msdu_count == 2)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_2]++;
+ else if (msdu_count == 3)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_3]++;
+ else if (msdu_count == 4)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_4]++;
+ else if (msdu_count > 4)
+ stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MORE]++;
+}
+
+static void ath10k_rx_stats_update_ampdu_subfrm(struct ath10k *ar,
+ struct ath10k_sta_tid_stats *stats,
+ u32 mpdu_count)
+{
+ if (mpdu_count <= 10)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_10]++;
+ else if (mpdu_count <= 20)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_20]++;
+ else if (mpdu_count <= 30)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_30]++;
+ else if (mpdu_count <= 40)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_40]++;
+ else if (mpdu_count <= 50)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_50]++;
+ else if (mpdu_count <= 60)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_60]++;
+ else if (mpdu_count > 60)
+ stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MORE]++;
+}
+
+void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid,
+ struct htt_rx_indication_mpdu_range *ranges,
+ int num_ranges)
+{
+ struct ath10k_sta *arsta;
+ struct ath10k_peer *peer;
+ int i;
+
+ if (tid > IEEE80211_NUM_TIDS || !(ar->sta_tid_stats_mask & BIT(tid)))
+ return;
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer || !peer->sta)
+ goto out;
+
+ arsta = (struct ath10k_sta *)peer->sta->drv_priv;
+
+ for (i = 0; i < num_ranges; i++)
+ ath10k_rx_stats_update_ampdu_subfrm(ar,
+ &arsta->tid_stats[tid],
+ ranges[i].mpdu_count);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
+ unsigned long int num_msdus,
+ enum ath10k_pkt_rx_err err,
+ unsigned long int unchain_cnt,
+ unsigned long int drop_cnt,
+ unsigned long int drop_cnt_filter,
+ unsigned long int queued_msdus)
+{
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+ struct ieee80211_hdr *hdr;
+ struct ath10k_sta_tid_stats *stats;
+ u8 tid = IEEE80211_NUM_TIDS;
+ bool non_data_frm = false;
+
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ if (!ieee80211_is_data(hdr->frame_control))
+ non_data_frm = true;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+
+ if (!(ar->sta_tid_stats_mask & BIT(tid)) || non_data_frm)
+ return;
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, hdr->addr2, NULL);
+ if (!sta)
+ goto exit;
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ stats = &arsta->tid_stats[tid];
+ stats->rx_pkt_from_fw += num_msdus;
+ stats->rx_pkt_unchained += unchain_cnt;
+ stats->rx_pkt_drop_chained += drop_cnt;
+ stats->rx_pkt_drop_filter += drop_cnt_filter;
+ if (err != ATH10K_PKT_RX_ERR_MAX)
+ stats->rx_pkt_err[err] += queued_msdus;
+ stats->rx_pkt_queued_for_mac += queued_msdus;
+ ath10k_rx_stats_update_amsdu_subfrm(ar, &arsta->tid_stats[tid],
+ num_msdus);
+ spin_unlock_bh(&ar->data_lock);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath10k_sta_update_extd_stats_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ struct ath10k_fw_extd_stats_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ rcu_read_lock();
+ list_for_each_entry(peer, &stats->peers_extd, list) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
+ NULL);
+ if (!sta)
+ continue;
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_duration += (u64)peer->rx_duration;
+ }
+ rcu_read_unlock();
+}
+
+static void ath10k_sta_update_stats_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ struct ath10k_fw_stats_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+
+ rcu_read_lock();
+ list_for_each_entry(peer, &stats->peers, list) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
+ NULL);
+ if (!sta)
+ continue;
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+ arsta->rx_duration += (u64)peer->rx_duration;
+ }
+ rcu_read_unlock();
+}
+
+void ath10k_sta_update_rx_duration(struct ath10k *ar,
+ struct ath10k_fw_stats *stats)
+{
+ if (stats->extended)
+ ath10k_sta_update_extd_stats_rx_duration(ar, stats);
+ else
+ ath10k_sta_update_stats_rx_duration(ar, stats);
+}
+
+static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
+ (arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
+ "auto" : "manual");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 aggr_mode;
+ int ret;
+
+ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+ return -EINVAL;
+
+ if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (aggr_mode == arsta->aggr_mode)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
+ goto out;
+ }
+
+ arsta->aggr_mode = aggr_mode;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+ .read = ath10k_dbg_sta_read_aggr_mode,
+ .write = ath10k_dbg_sta_write_aggr_mode,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, buf_size;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &buf_size);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, buf_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba = {
+ .write = ath10k_dbg_sta_write_addba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, status;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &status);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, status);
+ if (ret) {
+ ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, status);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+ .write = ath10k_dbg_sta_write_addba_resp,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u32 tid, initiator, reason;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+ if (ret != 3)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if ((ar->state != ATH10K_STATE_ON) ||
+ (arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, initiator, reason);
+ if (ret) {
+ ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, initiator,
+ reason);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_delba = {
+ .write = ath10k_dbg_sta_write_delba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath10k_dbg_sta_read_peer_debug_trigger(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ char buf[8];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len,
+ "Write 1 to once trigger the debug logs\n");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ u8 peer_debug_trigger;
+ int ret;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &peer_debug_trigger))
+ return -EINVAL;
+
+ if (peer_debug_trigger != 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
+ WMI_PEER_DEBUG, peer_debug_trigger);
+ if (ret) {
+ ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
+ ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return count;
+}
+
+static const struct file_operations fops_peer_debug_trigger = {
+ .open = simple_open,
+ .read = ath10k_dbg_sta_read_peer_debug_trigger,
+ .write = ath10k_dbg_sta_write_peer_debug_trigger,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static char *get_err_str(enum ath10k_pkt_rx_err i)
+{
+ switch (i) {
+ case ATH10K_PKT_RX_ERR_FCS:
+ return "fcs_err";
+ case ATH10K_PKT_RX_ERR_TKIP:
+ return "tkip_err";
+ case ATH10K_PKT_RX_ERR_CRYPT:
+ return "crypt_err";
+ case ATH10K_PKT_RX_ERR_PEER_IDX_INVAL:
+ return "peer_idx_inval";
+ case ATH10K_PKT_RX_ERR_MAX:
+ return "unknown";
+ }
+
+ return "unknown";
+}
+
+static char *get_num_ampdu_subfrm_str(enum ath10k_ampdu_subfrm_num i)
+{
+ switch (i) {
+ case ATH10K_AMPDU_SUBFRM_NUM_10:
+ return "upto 10";
+ case ATH10K_AMPDU_SUBFRM_NUM_20:
+ return "11-20";
+ case ATH10K_AMPDU_SUBFRM_NUM_30:
+ return "21-30";
+ case ATH10K_AMPDU_SUBFRM_NUM_40:
+ return "31-40";
+ case ATH10K_AMPDU_SUBFRM_NUM_50:
+ return "41-50";
+ case ATH10K_AMPDU_SUBFRM_NUM_60:
+ return "51-60";
+ case ATH10K_AMPDU_SUBFRM_NUM_MORE:
+ return ">60";
+ case ATH10K_AMPDU_SUBFRM_NUM_MAX:
+ return "0";
+ }
+
+ return "0";
+}
+
+static char *get_num_amsdu_subfrm_str(enum ath10k_amsdu_subfrm_num i)
+{
+ switch (i) {
+ case ATH10K_AMSDU_SUBFRM_NUM_1:
+ return "1";
+ case ATH10K_AMSDU_SUBFRM_NUM_2:
+ return "2";
+ case ATH10K_AMSDU_SUBFRM_NUM_3:
+ return "3";
+ case ATH10K_AMSDU_SUBFRM_NUM_4:
+ return "4";
+ case ATH10K_AMSDU_SUBFRM_NUM_MORE:
+ return ">4";
+ case ATH10K_AMSDU_SUBFRM_NUM_MAX:
+ return "0";
+ }
+
+ return "0";
+}
+
+#define PRINT_TID_STATS(_field, _tabs) \
+ do { \
+ int k = 0; \
+ for (j = 0; j <= IEEE80211_NUM_TIDS; j++) { \
+ if (ar->sta_tid_stats_mask & BIT(j)) { \
+ len += scnprintf(buf + len, buf_len - len, \
+ "[%02d] %-10lu ", \
+ j, stats[j]._field); \
+ k++; \
+ if (k % 8 == 0) { \
+ len += scnprintf(buf + len, \
+ buf_len - len, "\n"); \
+ len += scnprintf(buf + len, \
+ buf_len - len, \
+ _tabs); \
+ } \
+ } \
+ } \
+ len += scnprintf(buf + len, buf_len - len, "\n"); \
+ } while (0)
+
+static ssize_t ath10k_dbg_sta_read_tid_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+ struct ath10k_sta_tid_stats *stats = arsta->tid_stats;
+ size_t len = 0, buf_len = 1048 * IEEE80211_NUM_TIDS;
+ char *buf;
+ int i, j;
+ ssize_t ret;
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "\n\t\tDriver Rx pkt stats per tid, ([tid] count)\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "\t\t------------------------------------------\n");
+ len += scnprintf(buf + len, buf_len - len, "MSDUs from FW\t\t\t");
+ PRINT_TID_STATS(rx_pkt_from_fw, "\t\t\t\t");
+
+ len += scnprintf(buf + len, buf_len - len, "MSDUs unchained\t\t\t");
+ PRINT_TID_STATS(rx_pkt_unchained, "\t\t\t\t");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "MSDUs locally dropped:chained\t");
+ PRINT_TID_STATS(rx_pkt_drop_chained, "\t\t\t\t");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "MSDUs locally dropped:filtered\t");
+ PRINT_TID_STATS(rx_pkt_drop_filter, "\t\t\t\t");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "MSDUs queued for mac80211\t");
+ PRINT_TID_STATS(rx_pkt_queued_for_mac, "\t\t\t\t");
+
+ for (i = 0; i < ATH10K_PKT_RX_ERR_MAX; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "MSDUs with error:%s\t", get_err_str(i));
+ PRINT_TID_STATS(rx_pkt_err[i], "\t\t\t\t");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ for (i = 0; i < ATH10K_AMPDU_SUBFRM_NUM_MAX; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "A-MPDU num subframes %s\t",
+ get_num_ampdu_subfrm_str(i));
+ PRINT_TID_STATS(rx_pkt_ampdu[i], "\t\t\t\t");
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ for (i = 0; i < ATH10K_AMSDU_SUBFRM_NUM_MAX; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "A-MSDU num subframes %s\t\t",
+ get_num_amsdu_subfrm_str(i));
+ PRINT_TID_STATS(rx_pkt_amsdu[i], "\t\t\t\t");
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static const struct file_operations fops_tid_stats_dump = {
+ .open = simple_open,
+ .read = ath10k_dbg_sta_read_tid_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
+ debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
+ debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
+ debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
+ debugfs_create_file("peer_debug_trigger", 0600, dir, sta,
+ &fops_peer_debug_trigger);
+ debugfs_create_file("dump_tid_stats", 0400, dir, sta,
+ &fops_tid_stats_dump);
+}
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
new file mode 100644
index 000000000..1a59ea006
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -0,0 +1,234 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2015,2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#include <linux/kernel.h>
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+struct ath10k_hif_sg_item {
+ u16 transfer_id;
+ void *transfer_context; /* NULL = tx completion callback not called */
+ void *vaddr; /* for debugging mostly */
+ dma_addr_t paddr;
+ u16 len;
+};
+
+struct ath10k_hif_ops {
+ /* send a scatter-gather list to the target */
+ int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items);
+
+ /* read firmware memory through the diagnose interface */
+ int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len);
+
+ int (*diag_write)(struct ath10k *ar, u32 address, const void *data,
+ int nbytes);
+ /*
+ * API to handle HIF-specific BMI message exchanges, this API is
+ * synchronous and only allowed to be called from a context that
+ * can block (sleep)
+ */
+ int (*exchange_bmi_msg)(struct ath10k *ar,
+ void *request, u32 request_len,
+ void *response, u32 *response_len);
+
+ /* Post BMI phase, after FW is loaded. Starts regular operation */
+ int (*start)(struct ath10k *ar);
+
+ /* Clean up what start() did. This does not revert to BMI phase. If
+ * desired so, call power_down() and power_up()
+ */
+ void (*stop)(struct ath10k *ar);
+
+ int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+
+ void (*get_default_pipe)(struct ath10k *ar, u8 *ul_pipe, u8 *dl_pipe);
+
+ /*
+ * Check if prior sends have completed.
+ *
+ * Check whether the pipe in question has any completed
+ * sends that have not yet been processed.
+ * This function is only relevant for HIF pipes that are configured
+ * to be polled rather than interrupt-driven.
+ */
+ void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
+
+ u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+
+ u32 (*read32)(struct ath10k *ar, u32 address);
+
+ void (*write32)(struct ath10k *ar, u32 address, u32 value);
+
+ /* Power up the device and enter BMI transfer mode for FW download */
+ int (*power_up)(struct ath10k *ar);
+
+ /* Power down the device and free up resources. stop() must be called
+ * before this if start() was called earlier
+ */
+ void (*power_down)(struct ath10k *ar);
+
+ int (*suspend)(struct ath10k *ar);
+ int (*resume)(struct ath10k *ar);
+
+ /* fetch calibration data from target eeprom */
+ int (*fetch_cal_eeprom)(struct ath10k *ar, void **data,
+ size_t *data_len);
+
+ int (*get_target_info)(struct ath10k *ar,
+ struct bmi_target_info *target_info);
+};
+
+static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items,
+ int n_items)
+{
+ return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
+}
+
+static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ar->hif.ops->diag_read(ar, address, buf, buf_len);
+}
+
+static inline int ath10k_hif_diag_write(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ if (!ar->hif.ops->diag_write)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->diag_write(ar, address, data, nbytes);
+}
+
+static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *request, u32 request_len,
+ void *response, u32 *response_len)
+{
+ return ar->hif.ops->exchange_bmi_msg(ar, request, request_len,
+ response, response_len);
+}
+
+static inline int ath10k_hif_start(struct ath10k *ar)
+{
+ return ar->hif.ops->start(ar);
+}
+
+static inline void ath10k_hif_stop(struct ath10k *ar)
+{
+ return ar->hif.ops->stop(ar);
+}
+
+static inline int ath10k_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ return ar->hif.ops->map_service_to_pipe(ar, service_id,
+ ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ar->hif.ops->get_default_pipe(ar, ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
+ u8 pipe_id, int force)
+{
+ ar->hif.ops->send_complete_check(ar, pipe_id, force);
+}
+
+static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
+ u8 pipe_id)
+{
+ return ar->hif.ops->get_free_queue_number(ar, pipe_id);
+}
+
+static inline int ath10k_hif_power_up(struct ath10k *ar)
+{
+ return ar->hif.ops->power_up(ar);
+}
+
+static inline void ath10k_hif_power_down(struct ath10k *ar)
+{
+ ar->hif.ops->power_down(ar);
+}
+
+static inline int ath10k_hif_suspend(struct ath10k *ar)
+{
+ if (!ar->hif.ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->suspend(ar);
+}
+
+static inline int ath10k_hif_resume(struct ath10k *ar)
+{
+ if (!ar->hif.ops->resume)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->resume(ar);
+}
+
+static inline u32 ath10k_hif_read32(struct ath10k *ar, u32 address)
+{
+ if (!ar->hif.ops->read32) {
+ ath10k_warn(ar, "hif read32 not supported\n");
+ return 0xdeaddead;
+ }
+
+ return ar->hif.ops->read32(ar, address);
+}
+
+static inline void ath10k_hif_write32(struct ath10k *ar,
+ u32 address, u32 data)
+{
+ if (!ar->hif.ops->write32) {
+ ath10k_warn(ar, "hif write32 not supported\n");
+ return;
+ }
+
+ ar->hif.ops->write32(ar, address, data);
+}
+
+static inline int ath10k_hif_fetch_cal_eeprom(struct ath10k *ar,
+ void **data,
+ size_t *data_len)
+{
+ if (!ar->hif.ops->fetch_cal_eeprom)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->fetch_cal_eeprom(ar, data, data_len);
+}
+
+static inline int ath10k_hif_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *tgt_info)
+{
+ if (!ar->hif.ops->get_target_info)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->get_target_info(ar, tgt_info);
+}
+
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
new file mode 100644
index 000000000..331b8d558
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -0,0 +1,910 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+
+/********/
+/* Send */
+/********/
+
+static void ath10k_htc_control_tx_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ skb_cb = ATH10K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
+ return skb;
+}
+
+static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
+ struct sk_buff *skb)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+
+ dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+}
+
+void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = ep->htc->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
+ ep->eid, skb);
+
+ ath10k_htc_restore_tx_skb(ep->htc, skb);
+
+ if (!ep->ep_ops.ep_tx_complete) {
+ ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
+}
+EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
+
+static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc_hdr *hdr;
+
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+
+ hdr->eid = ep->eid;
+ hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
+ hdr->flags = 0;
+ hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->seq_no = ep->seq_no++;
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath10k_htc_send(struct ath10k_htc *htc,
+ enum ath10k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct ath10k_hif_sg_item sg_item;
+ struct device *dev = htc->ar->dev;
+ int credits = 0;
+ int ret;
+
+ if (htc->ar->state == ATH10K_STATE_WEDGED)
+ return -ECOMM;
+
+ if (eid >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
+
+ if (ep->tx_credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc insufficient credits ep %d required %d available %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d consumed %d credits (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath10k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->eid = eid;
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
+
+ sg_item.transfer_id = ep->eid;
+ sg_item.transfer_context = skb;
+ sg_item.vaddr = skb->data;
+ sg_item.paddr = skb_cb->paddr;
+ sg_item.len = skb->len;
+
+ ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ if (ep->tx_credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc ep %d reverted %d credits back (total %d)\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ return ret;
+}
+
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_skb_cb *skb_cb;
+ struct ath10k_htc_ep *ep;
+
+ if (WARN_ON_ONCE(!skb))
+ return;
+
+ skb_cb = ATH10K_SKB_CB(skb);
+ ep = &htc->endpoint[skb_cb->eid];
+
+ ath10k_htc_notify_tx_completion(ep, skb);
+ /* the skb now belongs to the completion handler */
+}
+EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
+
+/***********/
+/* Receive */
+/***********/
+
+static void
+ath10k_htc_process_credit_report(struct ath10k_htc *htc,
+ const struct ath10k_htc_credit_report *report,
+ int len,
+ enum ath10k_htc_ep_id eid)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath10k_warn(ar, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH10K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ar);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int
+ath10k_htc_process_lookahead(struct ath10k_htc *htc,
+ const struct ath10k_htc_lookahead_report *report,
+ int len,
+ enum ath10k_htc_ep_id eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len)
+{
+ struct ath10k *ar = htc->ar;
+
+ /* Invalid lookahead flags are actually transmitted by
+ * the target in the HTC control message.
+ * Since this will happen at every boot we silently ignore
+ * the lookahead in this case
+ */
+ if (report->pre_valid != ((~report->post_valid) & 0xFF))
+ return 0;
+
+ if (next_lookaheads && next_lookaheads_len) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
+ report->pre_valid, report->post_valid);
+
+ /* look ahead bytes are valid, copy them over */
+ memcpy((u8 *)next_lookaheads, report->lookahead, 4);
+
+ *next_lookaheads_len = 1;
+ }
+
+ return 0;
+}
+
+static int
+ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
+ const struct ath10k_htc_lookahead_bundle *report,
+ int len,
+ enum ath10k_htc_ep_id eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len)
+{
+ struct ath10k *ar = htc->ar;
+ int bundle_cnt = len / sizeof(*report);
+
+ if (!bundle_cnt || (bundle_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE)) {
+ ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
+ bundle_cnt);
+ return -EINVAL;
+ }
+
+ if (next_lookaheads && next_lookaheads_len) {
+ int i;
+
+ for (i = 0; i < bundle_cnt; i++) {
+ memcpy(((u8 *)next_lookaheads) + 4 * i,
+ report->lookahead, 4);
+ report++;
+ }
+
+ *next_lookaheads_len = bundle_cnt;
+ }
+
+ return 0;
+}
+
+int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath10k_htc_ep_id src_eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len)
+{
+ struct ath10k_htc_lookahead_bundle *bundle;
+ struct ath10k *ar = htc->ar;
+ int status = 0;
+ struct ath10k_htc_record *record;
+ u8 *orig_buffer;
+ int orig_length;
+ size_t len;
+
+ orig_buffer = buffer;
+ orig_length = length;
+
+ while (length > 0) {
+ record = (struct ath10k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath10k_warn(ar, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ switch (record->hdr.id) {
+ case ATH10K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath10k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath10k_warn(ar, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath10k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ case ATH10K_HTC_RECORD_LOOKAHEAD:
+ len = sizeof(struct ath10k_htc_lookahead_report);
+ if (record->hdr.len < len) {
+ ath10k_warn(ar, "Lookahead report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ status = ath10k_htc_process_lookahead(htc,
+ record->lookahead_report,
+ record->hdr.len,
+ src_eid,
+ next_lookaheads,
+ next_lookaheads_len);
+ break;
+ case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
+ bundle = record->lookahead_bundle;
+ status = ath10k_htc_process_lookahead_bundle(htc,
+ bundle,
+ record->hdr.len,
+ src_eid,
+ next_lookaheads,
+ next_lookaheads_len);
+ break;
+ default:
+ ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ if (status)
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
+ orig_buffer, orig_length);
+
+ return status;
+}
+EXPORT_SYMBOL(ath10k_htc_process_trailer);
+
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_hdr *hdr;
+ struct ath10k_htc_ep *ep;
+ u16 payload_len;
+ u32 trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath10k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = hdr->eid;
+
+ if (eid >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
+ hdr, sizeof(*hdr));
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ payload_len = __le16_to_cpu(hdr->len);
+
+ if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
+ ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
+ hdr, sizeof(*hdr));
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
+ "", hdr, sizeof(*hdr));
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = hdr->trailer_len;
+ min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath10k_warn(ar, "Invalid trailer length: %d\n",
+ trailer_len);
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath10k_htc_process_trailer(htc, trailer,
+ trailer_len, hdr->eid,
+ NULL, NULL);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (((int)payload_len - (int)trailer_len) <= 0)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ eid, skb);
+ ep->ep_ops.ep_rx_complete(ar, skb);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
+
+static void ath10k_htc_control_rx_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
+
+ switch (__le16_to_cpu(msg->hdr.message_id)) {
+ case ATH10K_HTC_MSG_READY_ID:
+ case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath10k_warn(ar, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH10K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+ htc->htc_ops.target_send_suspend_complete(ar);
+ break;
+ default:
+ ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
+ break;
+ }
+
+out:
+ kfree_skb(skb);
+}
+
+/***************/
+/* Init/Deinit */
+/***************/
+
+static const char *htc_service_name(enum ath10k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH10K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH10K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH10K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
+ return "HTT Data";
+ case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
+ return "HTT Data";
+ case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
+ return "PKTLOG";
+ }
+
+ return "Unknown";
+}
+
+static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
+{
+ struct ath10k_htc_ep *ep;
+ int i;
+
+ for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
+ u16 service_id)
+{
+ u8 allocation = 0;
+
+ /* The WMI control service is the only service with flow control.
+ * Let it have all transmit credits.
+ */
+ if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
+ allocation = htc->total_transmit_credits;
+
+ return allocation;
+}
+
+int ath10k_htc_wait_target(struct ath10k_htc *htc)
+{
+ struct ath10k *ar = htc->ar;
+ int i, status = 0;
+ unsigned long time_left;
+ struct ath10k_htc_msg *msg;
+ u16 message_id;
+
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
+ /* Workaround: In some cases the PCI HIF doesn't
+ * receive interrupt for the control response message
+ * even if the buffer was completed. It is suspected
+ * iomap writes unmasking PCI CE irqs aren't propagated
+ * properly in KVM PCI-passthrough sometimes.
+ */
+ ath10k_warn(ar, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_hif_send_complete_check(htc->ar, i, 1);
+
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (!time_left)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
+ ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+ message_id = __le16_to_cpu(msg->hdr.message_id);
+
+ if (message_id != ATH10K_HTC_MSG_READY_ID) {
+ ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
+ htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "Target ready! transmit resources: %d size:%d\n",
+ htc->total_transmit_credits,
+ htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath10k_err(ar, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ /* The only way to determine if the ready message is an extended
+ * message is from the size.
+ */
+ if (htc->control_resp_len >=
+ sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
+ htc->max_msgs_per_htc_bundle =
+ min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
+ HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "Extended ready message. RX bundle size: %d\n",
+ htc->max_msgs_per_htc_bundle);
+ }
+
+ return 0;
+}
+
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+ struct ath10k_htc_svc_conn_req *conn_req,
+ struct ath10k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath10k *ar = htc->ar;
+ struct ath10k_htc_msg *msg;
+ struct ath10k_htc_conn_svc *req_msg;
+ struct ath10k_htc_conn_svc_response resp_msg_dummy;
+ struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
+ enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ unsigned long time_left;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH10K_HTC_EP_0;
+ max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath10k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+ if (!skb) {
+ ath10k_err(ar, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(msg->hdr) + sizeof(msg->connect_service);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ msg = (struct ath10k_htc_msg *)skb->data;
+ msg->hdr.message_id =
+ __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
+
+ flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
+ flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg = &msg->connect_service;
+ req_msg->flags = __cpu_to_le16(flags);
+ req_msg->service_id = __cpu_to_le16(conn_req->service_id);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_err(ar, "Service connect timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
+ resp_msg = &msg->connect_service_response;
+ message_id = __le16_to_cpu(msg->hdr.message_id);
+ service_id = __le16_to_cpu(resp_msg->service_id);
+
+ if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(msg->hdr) +
+ sizeof(msg->connect_service_response))) {
+ ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTC,
+ "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
+ htc_service_name(service_id),
+ resp_msg->status, resp_msg->eid);
+
+ conn_resp->connect_resp_code = resp_msg->status;
+
+ /* check response status */
+ if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ resp_msg->status);
+ return -EPROTO;
+ }
+
+ assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
+ max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
+
+setup:
+
+ if (assigned_eid >= ATH10K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
+ ep->tx_credits = tx_alloc;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath10k_hif_map_service_to_pipe(htc->ar,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id);
+ if (status)
+ return status;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot htc service '%s' eid %d TX flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn(ar, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+int ath10k_htc_start(struct ath10k_htc *htc)
+{
+ struct ath10k *ar = htc->ar;
+ struct sk_buff *skb;
+ int status = 0;
+ struct ath10k_htc_msg *msg;
+
+ skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath10k_htc_msg *)skb->data;
+ msg->hdr.message_id =
+ __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ if (ar->hif.bus == ATH10K_BUS_SDIO) {
+ /* Extra setup params used by SDIO */
+ msg->setup_complete_ext.flags =
+ __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
+ msg->setup_complete_ext.max_msgs_per_bundled_recv =
+ htc->max_msgs_per_htc_bundle;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
+
+ status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+/* registered target arrival callback from the HIF layer */
+int ath10k_htc_init(struct ath10k *ar)
+{
+ int status;
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath10k_htc_reset_endpoint_states(htc);
+
+ htc->ar = ar;
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_err(ar, "could not connect to htc service (%d)\n",
+ status);
+ return status;
+ }
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
new file mode 100644
index 000000000..51fda6c23
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -0,0 +1,396 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTC_H_
+#define _HTC_H_
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct ath10k;
+
+/****************/
+/* HTC protocol */
+/****************/
+
+/*
+ * HTC - host-target control protocol
+ *
+ * tx packets are generally <htc_hdr><payload>
+ * rx packets are more complex: <htc_hdr><payload><trailer>
+ *
+ * The payload + trailer length is stored in len.
+ * To get payload-only length one needs to payload - trailer_len.
+ *
+ * Trailer contains (possibly) multiple <htc_record>.
+ * Each record is a id-len-value.
+ *
+ * HTC header flags, control_byte0, control_byte1
+ * have different meaning depending whether its tx
+ * or rx.
+ *
+ * Alignment: htc_hdr, payload and trailer are
+ * 4-byte aligned.
+ */
+
+#define HTC_HOST_MAX_MSG_PER_RX_BUNDLE 8
+#define HTC_HOST_MAX_MSG_PER_TX_BUNDLE 16
+
+enum ath10k_htc_tx_flags {
+ ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH10K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath10k_htc_rx_flags {
+ ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK = 0x01,
+ ATH10K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH10K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath10k_htc_hdr {
+ u8 eid; /* @enum ath10k_htc_ep_id */
+ u8 flags; /* @enum ath10k_htc_tx_flags, ath10k_htc_rx_flags */
+ __le16 len;
+ union {
+ u8 trailer_len; /* for rx */
+ u8 control_byte0;
+ } __packed;
+ union {
+ u8 seq_no; /* for tx */
+ u8 control_byte1;
+ } __packed;
+ u8 pad0;
+ u8 pad1;
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_msg_id {
+ ATH10K_HTC_MSG_READY_ID = 1,
+ ATH10K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH10K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6
+};
+
+enum ath10k_htc_version {
+ ATH10K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH10K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+enum ath10k_htc_conn_flags {
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+ ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
+#define ATH10K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK 0x3
+ ATH10K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 1 << 2,
+ ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 1 << 3
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_MASK 0xFF00
+#define ATH10K_HTC_CONN_FLAGS_RECV_ALLOC_LSB 8
+};
+
+enum ath10k_htc_conn_svc_status {
+ ATH10K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH10K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH10K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH10K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH10K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+enum ath10k_htc_setup_complete_flags {
+ ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN = 1
+};
+
+struct ath10k_ath10k_htc_msg_hdr {
+ __le16 message_id; /* @enum htc_message_id */
+} __packed;
+
+struct ath10k_htc_unknown {
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_ready {
+ __le16 credit_count;
+ __le16 credit_size;
+ u8 max_endpoints;
+ u8 pad0;
+} __packed;
+
+struct ath10k_htc_ready_extended {
+ struct ath10k_htc_ready base;
+ u8 htc_version; /* @enum ath10k_htc_version */
+ u8 max_msgs_per_htc_bundle;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc {
+ __le16 service_id;
+ __le16 flags; /* @enum ath10k_htc_conn_flags */
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_conn_svc_response {
+ __le16 service_id;
+ u8 status; /* @enum ath10k_htc_conn_svc_status */
+ u8 eid;
+ __le16 max_msg_size;
+} __packed;
+
+struct ath10k_htc_setup_complete_extended {
+ u8 pad0;
+ u8 pad1;
+ __le32 flags; /* @enum htc_setup_complete_flags */
+ u8 max_msgs_per_bundled_recv;
+ u8 pad2;
+ u8 pad3;
+ u8 pad4;
+} __packed;
+
+struct ath10k_htc_msg {
+ struct ath10k_ath10k_htc_msg_hdr hdr;
+ union {
+ /* host-to-target */
+ struct ath10k_htc_conn_svc connect_service;
+ struct ath10k_htc_ready ready;
+ struct ath10k_htc_ready_extended ready_ext;
+ struct ath10k_htc_unknown unknown;
+ struct ath10k_htc_setup_complete_extended setup_complete_ext;
+
+ /* target-to-host */
+ struct ath10k_htc_conn_svc_response connect_service_response;
+ };
+} __packed __aligned(4);
+
+enum ath10k_ath10k_htc_record_id {
+ ATH10K_HTC_RECORD_NULL = 0,
+ ATH10K_HTC_RECORD_CREDITS = 1,
+ ATH10K_HTC_RECORD_LOOKAHEAD = 2,
+ ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE = 3,
+};
+
+struct ath10k_ath10k_htc_record_hdr {
+ u8 id; /* @enum ath10k_ath10k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_credit_report {
+ u8 eid; /* @enum ath10k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath10k_htc_lookahead_report {
+ u8 pre_valid;
+ u8 pad0;
+ u8 pad1;
+ u8 pad2;
+ u8 lookahead[4];
+ u8 post_valid;
+ u8 pad3;
+ u8 pad4;
+ u8 pad5;
+} __packed;
+
+struct ath10k_htc_lookahead_bundle {
+ u8 lookahead[4];
+} __packed;
+
+struct ath10k_htc_record {
+ struct ath10k_ath10k_htc_record_hdr hdr;
+ union {
+ struct ath10k_htc_credit_report credit_report[0];
+ struct ath10k_htc_lookahead_report lookahead_report[0];
+ struct ath10k_htc_lookahead_bundle lookahead_bundle[0];
+ u8 pauload[0];
+ };
+} __packed __aligned(4);
+
+/*
+ * note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath10k_htc_frame {
+ struct ath10k_htc_hdr hdr;
+ union {
+ struct ath10k_htc_msg msg;
+ u8 payload[0];
+ };
+ struct ath10k_htc_record trailer[0];
+} __packed __aligned(4);
+
+/*******************/
+/* Host-side stuff */
+/*******************/
+
+enum ath10k_htc_svc_gid {
+ ATH10K_HTC_SVC_GRP_RSVD = 0,
+ ATH10K_HTC_SVC_GRP_WMI = 1,
+ ATH10K_HTC_SVC_GRP_NMI = 2,
+ ATH10K_HTC_SVC_GRP_HTT = 3,
+ ATH10K_LOG_SERVICE_GROUP = 6,
+
+ ATH10K_HTC_SVC_GRP_TEST = 254,
+ ATH10K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath10k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH10K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH10K_HTC_SVC_ID_UNUSED = ATH10K_HTC_SVC_ID_RESERVED,
+
+ ATH10K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH10K_HTC_SVC_GRP_RSVD, 1),
+ ATH10K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_WMI, 0),
+ ATH10K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH10K_HTC_SVC_GRP_WMI, 1),
+ ATH10K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH10K_HTC_SVC_GRP_WMI, 2),
+ ATH10K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH10K_HTC_SVC_GRP_WMI, 3),
+ ATH10K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH10K_HTC_SVC_GRP_WMI, 4),
+
+ ATH10K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH10K_HTC_SVC_GRP_NMI, 0),
+ ATH10K_HTC_SVC_ID_NMI_DATA = SVC(ATH10K_HTC_SVC_GRP_NMI, 1),
+
+ ATH10K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 0),
+
+ ATH10K_HTC_SVC_ID_HTT_DATA2_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 1),
+ ATH10K_HTC_SVC_ID_HTT_DATA3_MSG = SVC(ATH10K_HTC_SVC_GRP_HTT, 2),
+ ATH10K_HTC_SVC_ID_HTT_LOG_MSG = SVC(ATH10K_LOG_SERVICE_GROUP, 0),
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH10K_HTC_SVC_GRP_TEST, 0),
+};
+
+#undef SVC
+
+enum ath10k_htc_ep_id {
+ ATH10K_HTC_EP_UNUSED = -1,
+ ATH10K_HTC_EP_0 = 0,
+ ATH10K_HTC_EP_1 = 1,
+ ATH10K_HTC_EP_2,
+ ATH10K_HTC_EP_3,
+ ATH10K_HTC_EP_4,
+ ATH10K_HTC_EP_5,
+ ATH10K_HTC_EP_6,
+ ATH10K_HTC_EP_7,
+ ATH10K_HTC_EP_8,
+ ATH10K_HTC_EP_COUNT,
+};
+
+struct ath10k_htc_ops {
+ void (*target_send_suspend_complete)(struct ath10k *ar);
+};
+
+struct ath10k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath10k *);
+};
+
+/* service connection information */
+struct ath10k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath10k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath10k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath10k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH10K_HTC_MAX_LEN 4096
+#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath10k_htc_hdr))
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+
+struct ath10k_htc_ep {
+ struct ath10k_htc *htc;
+ enum ath10k_htc_ep_id eid;
+ enum ath10k_htc_svc_id service_id;
+ struct ath10k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ bool tx_credit_flow_enabled;
+};
+
+struct ath10k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath10k_htc {
+ struct ath10k *ar;
+ struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ struct ath10k_htc_ops htc_ops;
+
+ u8 control_resp_buffer[ATH10K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ int target_credit_size;
+ u8 max_msgs_per_htc_bundle;
+};
+
+int ath10k_htc_init(struct ath10k *ar);
+int ath10k_htc_wait_target(struct ath10k_htc *htc);
+int ath10k_htc_start(struct ath10k_htc *htc);
+int ath10k_htc_connect_service(struct ath10k_htc *htc,
+ struct ath10k_htc_svc_conn_req *conn_req,
+ struct ath10k_htc_svc_conn_resp *conn_resp);
+int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size);
+void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
+ struct sk_buff *skb);
+int ath10k_htc_process_trailer(struct ath10k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath10k_htc_ep_id src_eid,
+ void *next_lookaheads,
+ int *next_lookaheads_len);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
new file mode 100644
index 000000000..21a67f82f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/slab.h>
+#include <linux/if_ether.h>
+
+#include "htt.h"
+#include "core.h"
+#include "debug.h"
+
+static const enum htt_t2h_msg_type htt_main_t2h_msg_types[] = {
+ [HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_MAIN_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND] =
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_MAIN_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10x_t2h_msg_types[] = {
+ [HTT_10X_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_10X_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_10X_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_10X_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_10X_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_10X_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_10X_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_10X_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_10X_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_10X_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+ [HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_10X_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+ [HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD] = HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ [HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+};
+
+static const enum htt_t2h_msg_type htt_tlv_t2h_msg_types[] = {
+ [HTT_TLV_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_TLV_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_TLV_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_TLV_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_TLV_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_TLV_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND] = HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND] =
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ [HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE] =
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ [HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR] =
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ [HTT_TLV_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+};
+
+static const enum htt_t2h_msg_type htt_10_4_t2h_msg_types[] = {
+ [HTT_10_4_T2H_MSG_TYPE_VERSION_CONF] = HTT_T2H_MSG_TYPE_VERSION_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_RX_IND] = HTT_T2H_MSG_TYPE_RX_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_FLUSH] = HTT_T2H_MSG_TYPE_RX_FLUSH,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_MAP] = HTT_T2H_MSG_TYPE_PEER_MAP,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP] = HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ [HTT_10_4_T2H_MSG_TYPE_RX_ADDBA] = HTT_T2H_MSG_TYPE_RX_ADDBA,
+ [HTT_10_4_T2H_MSG_TYPE_RX_DELBA] = HTT_T2H_MSG_TYPE_RX_DELBA,
+ [HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND] = HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ [HTT_10_4_T2H_MSG_TYPE_PKTLOG] = HTT_T2H_MSG_TYPE_PKTLOG,
+ [HTT_10_4_T2H_MSG_TYPE_STATS_CONF] = HTT_T2H_MSG_TYPE_STATS_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND] = HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ [HTT_10_4_T2H_MSG_TYPE_SEC_IND] = HTT_T2H_MSG_TYPE_SEC_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND] = HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND] =
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ [HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND] =
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ [HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE] = HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ [HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND] =
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_PN_IND] = HTT_T2H_MSG_TYPE_RX_PN_IND,
+ [HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND] =
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TEST] = HTT_T2H_MSG_TYPE_TEST,
+ [HTT_10_4_T2H_MSG_TYPE_EN_STATS] = HTT_T2H_MSG_TYPE_EN_STATS,
+ [HTT_10_4_T2H_MSG_TYPE_AGGR_CONF] = HTT_T2H_MSG_TYPE_AGGR_CONF,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+ [HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM] =
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
+ [HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD] =
+ HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ [HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND] =
+ HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
+ [HTT_10_4_T2H_MSG_TYPE_PEER_STATS] =
+ HTT_T2H_MSG_TYPE_PEER_STATS,
+};
+
+int ath10k_htt_connect(struct ath10k_htt *htt)
+{
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ htt->eid = conn_resp.eid;
+
+ return 0;
+}
+
+int ath10k_htt_init(struct ath10k *ar)
+{
+ struct ath10k_htt *htt = &ar->htt;
+
+ htt->ar = ar;
+
+ /*
+ * Prefetch enough data to satisfy target
+ * classification engine.
+ * This is for LL chips. HL chips will probably
+ * transfer all frame in the tx fragment.
+ */
+ htt->prefetch_len =
+ 36 + /* 802.11 + qos + ht */
+ 4 + /* 802.1q */
+ 8 + /* llc snap */
+ 2; /* ip4 dscp or ip6 priority */
+
+ switch (ar->running_fw->fw_file.htt_op_version) {
+ case ATH10K_FW_HTT_OP_VERSION_10_4:
+ ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_10_1:
+ ar->htt.t2h_msg_types = htt_10x_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_10X_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_TLV:
+ ar->htt.t2h_msg_types = htt_tlv_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_TLV_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAIN:
+ ar->htt.t2h_msg_types = htt_main_t2h_msg_types;
+ ar->htt.t2h_msg_types_max = HTT_MAIN_T2H_NUM_MSGS;
+ break;
+ case ATH10K_FW_HTT_OP_VERSION_MAX:
+ case ATH10K_FW_HTT_OP_VERSION_UNSET:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ ath10k_htt_set_tx_ops(htt);
+ ath10k_htt_set_rx_ops(htt);
+
+ return 0;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+static int ath10k_htt_verify_version(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt target version %d.%d\n",
+ htt->target_version_major, htt->target_version_minor);
+
+ if (htt->target_version_major != 2 &&
+ htt->target_version_major != 3) {
+ ath10k_err(ar, "unsupported htt major version %d. supported versions are 2 and 3\n",
+ htt->target_version_major);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_setup(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int status;
+
+ init_completion(&htt->target_version_received);
+
+ status = ath10k_htt_h2t_ver_req_msg(htt);
+ if (status)
+ return status;
+
+ status = wait_for_completion_timeout(&htt->target_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (status == 0) {
+ ath10k_warn(ar, "htt version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ status = ath10k_htt_verify_version(htt);
+ if (status) {
+ ath10k_warn(ar, "failed to verify htt version: %d\n",
+ status);
+ return status;
+ }
+
+ status = ath10k_htt_send_frag_desc_bank_cfg(htt);
+ if (status)
+ return status;
+
+ status = ath10k_htt_send_rx_ring_cfg(htt);
+ if (status) {
+ ath10k_warn(ar, "failed to setup rx ring: %d\n",
+ status);
+ return status;
+ }
+
+ status = ath10k_htt_h2t_aggr_cfg_msg(htt,
+ htt->max_num_ampdu,
+ htt->max_num_amsdu);
+ if (status) {
+ ath10k_warn(ar, "failed to setup amsdu/ampdu limit: %d\n",
+ status);
+ return status;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
new file mode 100644
index 000000000..5d3ff80f3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -0,0 +1,2075 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HTT_H_
+#define _HTT_H_
+
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/dmapool.h>
+#include <linux/hashtable.h>
+#include <linux/kfifo.h>
+#include <net/mac80211.h>
+
+#include "htc.h"
+#include "hw.h"
+#include "rx_desc.h"
+#include "hw.h"
+
+enum htt_dbg_stats_type {
+ HTT_DBG_STATS_WAL_PDEV_TXRX = 1 << 0,
+ HTT_DBG_STATS_RX_REORDER = 1 << 1,
+ HTT_DBG_STATS_RX_RATE_INFO = 1 << 2,
+ HTT_DBG_STATS_TX_PPDU_LOG = 1 << 3,
+ HTT_DBG_STATS_TX_RATE_INFO = 1 << 4,
+ /* bits 5-23 currently reserved */
+
+ HTT_DBG_NUM_STATS /* keep this last */
+};
+
+enum htt_h2t_msg_type { /* host-to-target */
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_TX_FRM = 1,
+ HTT_H2T_MSG_TYPE_RX_RING_CFG = 2,
+ HTT_H2T_MSG_TYPE_STATS_REQ = 3,
+ HTT_H2T_MSG_TYPE_SYNC = 4,
+ HTT_H2T_MSG_TYPE_AGGR_CFG = 5,
+ HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG = 6,
+
+ /* This command is used for sending management frames in HTT < 3.0.
+ * HTT >= 3.0 uses TX_FRM for everything.
+ */
+ HTT_H2T_MSG_TYPE_MGMT_TX = 7,
+ HTT_H2T_MSG_TYPE_TX_FETCH_RESP = 11,
+
+ HTT_H2T_NUM_MSGS /* keep this last */
+};
+
+struct htt_cmd_hdr {
+ u8 msg_type;
+} __packed;
+
+struct htt_ver_req {
+ u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+} __packed;
+
+/*
+ * HTT tx MSDU descriptor
+ *
+ * The HTT tx MSDU descriptor is created by the host HTT SW for each
+ * tx MSDU. The HTT tx MSDU descriptor contains the information that
+ * the target firmware needs for the FW's tx processing, particularly
+ * for creating the HW msdu descriptor.
+ * The same HTT tx descriptor is used for HL and LL systems, though
+ * a few fields within the tx descriptor are used only by LL or
+ * only by HL.
+ * The HTT tx descriptor is defined in two manners: by a struct with
+ * bitfields, and by a series of [dword offset, bit mask, bit shift]
+ * definitions.
+ * The target should use the struct def, for simplicitly and clarity,
+ * but the host shall use the bit-mast + bit-shift defs, to be endian-
+ * neutral. Specifically, the host shall use the get/set macros built
+ * around the mask + shift defs.
+ */
+struct htt_data_tx_desc_frag {
+ union {
+ struct double_word_addr {
+ __le32 paddr;
+ __le32 len;
+ } __packed dword_addr;
+ struct triple_word_addr {
+ __le32 paddr_lo;
+ __le16 paddr_hi;
+ __le16 len_16;
+ } __packed tword_addr;
+ } __packed;
+} __packed;
+
+struct htt_msdu_ext_desc {
+ __le32 tso_flag[3];
+ __le16 ip_identification;
+ u8 flags;
+ u8 reserved;
+ struct htt_data_tx_desc_frag frags[6];
+};
+
+struct htt_msdu_ext_desc_64 {
+ __le32 tso_flag[5];
+ __le16 ip_identification;
+ u8 flags;
+ u8 reserved;
+ struct htt_data_tx_desc_frag frags[6];
+};
+
+#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE BIT(0)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE BIT(1)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE BIT(2)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE BIT(3)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE BIT(4)
+
+#define HTT_MSDU_CHECKSUM_ENABLE (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE)
+
+#define HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 BIT(16)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 BIT(17)
+#define HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 BIT(18)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 BIT(19)
+#define HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64 BIT(20)
+#define HTT_MSDU_EXT_DESC_FLAG_PARTIAL_CSUM_ENABLE_64 BIT(21)
+
+#define HTT_MSDU_CHECKSUM_ENABLE_64 (HTT_MSDU_EXT_DESC_FLAG_IPV4_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV4_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_UDP_IPV6_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV4_CSUM_ENABLE_64 \
+ | HTT_MSDU_EXT_DESC_FLAG_TCP_IPV6_CSUM_ENABLE_64)
+
+enum htt_data_tx_desc_flags0 {
+ HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT = 1 << 0,
+ HTT_DATA_TX_DESC_FLAGS0_NO_AGGR = 1 << 1,
+ HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT = 1 << 2,
+ HTT_DATA_TX_DESC_FLAGS0_NO_CLASSIFY = 1 << 3,
+ HTT_DATA_TX_DESC_FLAGS0_RSVD0 = 1 << 4
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_MASK 0xE0
+#define HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE_LSB 5
+};
+
+enum htt_data_tx_desc_flags1 {
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_BITS 6
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_MASK 0x003F
+#define HTT_DATA_TX_DESC_FLAGS1_VDEV_ID_LSB 0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_BITS 5
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_MASK 0x07C0
+#define HTT_DATA_TX_DESC_FLAGS1_EXT_TID_LSB 6
+ HTT_DATA_TX_DESC_FLAGS1_POSTPONED = 1 << 11,
+ HTT_DATA_TX_DESC_FLAGS1_MORE_IN_BATCH = 1 << 12,
+ HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD = 1 << 13,
+ HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD = 1 << 14,
+ HTT_DATA_TX_DESC_FLAGS1_RSVD1 = 1 << 15
+};
+
+enum htt_data_tx_ext_tid {
+ HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST = 16,
+ HTT_DATA_TX_EXT_TID_MGMT = 17,
+ HTT_DATA_TX_EXT_TID_INVALID = 31
+};
+
+#define HTT_INVALID_PEERID 0xFFFF
+
+/*
+ * htt_data_tx_desc - used for data tx path
+ *
+ * Note: vdev_id irrelevant for pkt_type == raw and no_classify == 1.
+ * ext_tid: for qos-data frames (0-15), see %HTT_DATA_TX_EXT_TID_
+ * for special kinds of tids
+ * postponed: only for HL hosts. indicates if this is a resend
+ * (HL hosts manage queues on the host )
+ * more_in_batch: only for HL hosts. indicates if more packets are
+ * pending. this allows target to wait and aggregate
+ * freq: 0 means home channel of given vdev. intended for offchannel
+ */
+struct htt_data_tx_desc {
+ u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+ __le16 len;
+ __le16 id;
+ __le32 frags_paddr;
+ union {
+ __le32 peerid;
+ struct {
+ __le16 peerid;
+ __le16 freq;
+ } __packed offchan_tx;
+ } __packed;
+ u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
+struct htt_data_tx_desc_64 {
+ u8 flags0; /* %HTT_DATA_TX_DESC_FLAGS0_ */
+ __le16 flags1; /* %HTT_DATA_TX_DESC_FLAGS1_ */
+ __le16 len;
+ __le16 id;
+ __le64 frags_paddr;
+ union {
+ __le32 peerid;
+ struct {
+ __le16 peerid;
+ __le16 freq;
+ } __packed offchan_tx;
+ } __packed;
+ u8 prefetch[0]; /* start of frame, for FW classification engine */
+} __packed;
+
+enum htt_rx_ring_flags {
+ HTT_RX_RING_FLAGS_MAC80211_HDR = 1 << 0,
+ HTT_RX_RING_FLAGS_MSDU_PAYLOAD = 1 << 1,
+ HTT_RX_RING_FLAGS_PPDU_START = 1 << 2,
+ HTT_RX_RING_FLAGS_PPDU_END = 1 << 3,
+ HTT_RX_RING_FLAGS_MPDU_START = 1 << 4,
+ HTT_RX_RING_FLAGS_MPDU_END = 1 << 5,
+ HTT_RX_RING_FLAGS_MSDU_START = 1 << 6,
+ HTT_RX_RING_FLAGS_MSDU_END = 1 << 7,
+ HTT_RX_RING_FLAGS_RX_ATTENTION = 1 << 8,
+ HTT_RX_RING_FLAGS_FRAG_INFO = 1 << 9,
+ HTT_RX_RING_FLAGS_UNICAST_RX = 1 << 10,
+ HTT_RX_RING_FLAGS_MULTICAST_RX = 1 << 11,
+ HTT_RX_RING_FLAGS_CTRL_RX = 1 << 12,
+ HTT_RX_RING_FLAGS_MGMT_RX = 1 << 13,
+ HTT_RX_RING_FLAGS_NULL_RX = 1 << 14,
+ HTT_RX_RING_FLAGS_PHY_DATA_RX = 1 << 15
+};
+
+#define HTT_RX_RING_SIZE_MIN 128
+#define HTT_RX_RING_SIZE_MAX 2048
+#define HTT_RX_RING_SIZE HTT_RX_RING_SIZE_MAX
+#define HTT_RX_RING_FILL_LEVEL (((HTT_RX_RING_SIZE) / 2) - 1)
+#define HTT_RX_RING_FILL_LEVEL_DUAL_MAC (HTT_RX_RING_SIZE - 1)
+
+struct htt_rx_ring_setup_ring32 {
+ __le32 fw_idx_shadow_reg_paddr;
+ __le32 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ /* the following offsets are in 4-byte units */
+ __le16 mac80211_hdr_offset;
+ __le16 msdu_payload_offset;
+ __le16 ppdu_start_offset;
+ __le16 ppdu_end_offset;
+ __le16 mpdu_start_offset;
+ __le16 mpdu_end_offset;
+ __le16 msdu_start_offset;
+ __le16 msdu_end_offset;
+ __le16 rx_attention_offset;
+ __le16 frag_info_offset;
+} __packed;
+
+struct htt_rx_ring_setup_ring64 {
+ __le64 fw_idx_shadow_reg_paddr;
+ __le64 rx_ring_base_paddr;
+ __le16 rx_ring_len; /* in 4-byte words */
+ __le16 rx_ring_bufsize; /* rx skb size - in bytes */
+ __le16 flags; /* %HTT_RX_RING_FLAGS_ */
+ __le16 fw_idx_init_val;
+
+ /* the following offsets are in 4-byte units */
+ __le16 mac80211_hdr_offset;
+ __le16 msdu_payload_offset;
+ __le16 ppdu_start_offset;
+ __le16 ppdu_end_offset;
+ __le16 mpdu_start_offset;
+ __le16 mpdu_end_offset;
+ __le16 msdu_start_offset;
+ __le16 msdu_end_offset;
+ __le16 rx_attention_offset;
+ __le16 frag_info_offset;
+} __packed;
+
+struct htt_rx_ring_setup_hdr {
+ u8 num_rings; /* supported values: 1, 2 */
+ __le16 rsvd0;
+} __packed;
+
+struct htt_rx_ring_setup_32 {
+ struct htt_rx_ring_setup_hdr hdr;
+ struct htt_rx_ring_setup_ring32 rings[0];
+} __packed;
+
+struct htt_rx_ring_setup_64 {
+ struct htt_rx_ring_setup_hdr hdr;
+ struct htt_rx_ring_setup_ring64 rings[0];
+} __packed;
+
+/*
+ * htt_stats_req - request target to send specified statistics
+ *
+ * @msg_type: hardcoded %HTT_H2T_MSG_TYPE_STATS_REQ
+ * @upload_types: see %htt_dbg_stats_type. this is 24bit field actually
+ * so make sure its little-endian.
+ * @reset_types: see %htt_dbg_stats_type. this is 24bit field actually
+ * so make sure its little-endian.
+ * @cfg_val: stat_type specific configuration
+ * @stat_type: see %htt_dbg_stats_type
+ * @cookie_lsb: used for confirmation message from target->host
+ * @cookie_msb: ditto as %cookie
+ */
+struct htt_stats_req {
+ u8 upload_types[3];
+ u8 rsvd0;
+ u8 reset_types[3];
+ struct {
+ u8 mpdu_bytes;
+ u8 mpdu_num_msdus;
+ u8 msdu_bytes;
+ } __packed;
+ u8 stat_type;
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+} __packed;
+
+#define HTT_STATS_REQ_CFG_STAT_TYPE_INVALID 0xff
+
+/*
+ * htt_oob_sync_req - request out-of-band sync
+ *
+ * The HTT SYNC tells the target to suspend processing of subsequent
+ * HTT host-to-target messages until some other target agent locally
+ * informs the target HTT FW that the current sync counter is equal to
+ * or greater than (in a modulo sense) the sync counter specified in
+ * the SYNC message.
+ *
+ * This allows other host-target components to synchronize their operation
+ * with HTT, e.g. to ensure that tx frames don't get transmitted until a
+ * security key has been downloaded to and activated by the target.
+ * In the absence of any explicit synchronization counter value
+ * specification, the target HTT FW will use zero as the default current
+ * sync value.
+ *
+ * The HTT target FW will suspend its host->target message processing as long
+ * as 0 < (in-band sync counter - out-of-band sync counter) & 0xff < 128.
+ */
+struct htt_oob_sync_req {
+ u8 sync_count;
+ __le16 rsvd0;
+} __packed;
+
+struct htt_aggr_conf {
+ u8 max_num_ampdu_subframes;
+ /* amsdu_subframes is limited by 0x1F mask */
+ u8 max_num_amsdu_subframes;
+} __packed;
+
+#define HTT_MGMT_FRM_HDR_DOWNLOAD_LEN 32
+struct htt_mgmt_tx_desc_qca99x0 {
+ __le32 rate;
+} __packed;
+
+struct htt_mgmt_tx_desc {
+ u8 pad[sizeof(u32) - sizeof(struct htt_cmd_hdr)];
+ __le32 msdu_paddr;
+ __le32 desc_id;
+ __le32 len;
+ __le32 vdev_id;
+ u8 hdr[HTT_MGMT_FRM_HDR_DOWNLOAD_LEN];
+ union {
+ struct htt_mgmt_tx_desc_qca99x0 qca99x0;
+ } __packed;
+} __packed;
+
+enum htt_mgmt_tx_status {
+ HTT_MGMT_TX_STATUS_OK = 0,
+ HTT_MGMT_TX_STATUS_RETRY = 1,
+ HTT_MGMT_TX_STATUS_DROP = 2
+};
+
+/*=== target -> host messages ===============================================*/
+
+enum htt_main_t2h_msg_type {
+ HTT_MAIN_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_MAIN_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_MAIN_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_MAIN_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_MAIN_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_MAIN_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_MAIN_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_MAIN_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_MAIN_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_MAIN_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_MAIN_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_MAIN_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_MAIN_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_MAIN_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_MAIN_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_MAIN_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_MAIN_T2H_NUM_MSGS
+};
+
+enum htt_10x_t2h_msg_type {
+ HTT_10X_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_10X_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_10X_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_10X_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_10X_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_10X_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_10X_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_10X_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_10X_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_10X_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_10X_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_10X_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_10X_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_10X_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_10X_T2H_MSG_TYPE_TEST = 0xe,
+ HTT_10X_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
+ HTT_10X_T2H_MSG_TYPE_AGGR_CONF = 0x11,
+ HTT_10X_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x12,
+ HTT_10X_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0x13,
+ /* keep this last */
+ HTT_10X_T2H_NUM_MSGS
+};
+
+enum htt_tlv_t2h_msg_type {
+ HTT_TLV_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_TLV_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_TLV_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_TLV_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_TLV_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_TLV_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_TLV_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_TLV_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_TLV_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_TLV_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_TLV_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_TLV_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_TLV_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, /* deprecated */
+ HTT_TLV_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_TLV_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_TLV_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
+ HTT_TLV_T2H_MSG_TYPE_RX_PN_IND = 0x10,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
+ HTT_TLV_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
+ /* 0x13 reservd */
+ HTT_TLV_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
+ HTT_TLV_T2H_MSG_TYPE_CHAN_CHANGE = 0x15,
+ HTT_TLV_T2H_MSG_TYPE_RX_OFLD_PKT_ERR = 0x16,
+ HTT_TLV_T2H_MSG_TYPE_TEST,
+ /* keep this last */
+ HTT_TLV_T2H_NUM_MSGS
+};
+
+enum htt_10_4_t2h_msg_type {
+ HTT_10_4_T2H_MSG_TYPE_VERSION_CONF = 0x0,
+ HTT_10_4_T2H_MSG_TYPE_RX_IND = 0x1,
+ HTT_10_4_T2H_MSG_TYPE_RX_FLUSH = 0x2,
+ HTT_10_4_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_10_4_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_10_4_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_10_4_T2H_MSG_TYPE_RX_DELBA = 0x6,
+ HTT_10_4_T2H_MSG_TYPE_TX_COMPL_IND = 0x7,
+ HTT_10_4_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_10_4_T2H_MSG_TYPE_STATS_CONF = 0x9,
+ HTT_10_4_T2H_MSG_TYPE_RX_FRAG_IND = 0xa,
+ HTT_10_4_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_10_4_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
+ HTT_10_4_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
+ HTT_10_4_T2H_MSG_TYPE_MGMT_TX_COMPL_IND = 0xe,
+ HTT_10_4_T2H_MSG_TYPE_CHAN_CHANGE = 0xf,
+ HTT_10_4_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0x10,
+ HTT_10_4_T2H_MSG_TYPE_RX_PN_IND = 0x11,
+ HTT_10_4_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x12,
+ HTT_10_4_T2H_MSG_TYPE_TEST = 0x13,
+ HTT_10_4_T2H_MSG_TYPE_EN_STATS = 0x14,
+ HTT_10_4_T2H_MSG_TYPE_AGGR_CONF = 0x15,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_IND = 0x16,
+ HTT_10_4_T2H_MSG_TYPE_TX_FETCH_CONFIRM = 0x17,
+ HTT_10_4_T2H_MSG_TYPE_STATS_NOUPLOAD = 0x18,
+ /* 0x19 to 0x2f are reserved */
+ HTT_10_4_T2H_MSG_TYPE_TX_MODE_SWITCH_IND = 0x30,
+ HTT_10_4_T2H_MSG_TYPE_PEER_STATS = 0x31,
+ /* keep this last */
+ HTT_10_4_T2H_NUM_MSGS
+};
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_RX_IND,
+ HTT_T2H_MSG_TYPE_RX_FLUSH,
+ HTT_T2H_MSG_TYPE_PEER_MAP,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP,
+ HTT_T2H_MSG_TYPE_RX_ADDBA,
+ HTT_T2H_MSG_TYPE_RX_DELBA,
+ HTT_T2H_MSG_TYPE_TX_COMPL_IND,
+ HTT_T2H_MSG_TYPE_PKTLOG,
+ HTT_T2H_MSG_TYPE_STATS_CONF,
+ HTT_T2H_MSG_TYPE_RX_FRAG_IND,
+ HTT_T2H_MSG_TYPE_SEC_IND,
+ HTT_T2H_MSG_TYPE_RC_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_TX_INSPECT_IND,
+ HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION,
+ HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND,
+ HTT_T2H_MSG_TYPE_RX_PN_IND,
+ HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND,
+ HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND,
+ HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE,
+ HTT_T2H_MSG_TYPE_CHAN_CHANGE,
+ HTT_T2H_MSG_TYPE_RX_OFLD_PKT_ERR,
+ HTT_T2H_MSG_TYPE_AGGR_CONF,
+ HTT_T2H_MSG_TYPE_STATS_NOUPLOAD,
+ HTT_T2H_MSG_TYPE_TEST,
+ HTT_T2H_MSG_TYPE_EN_STATS,
+ HTT_T2H_MSG_TYPE_TX_FETCH_IND,
+ HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM,
+ HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND,
+ HTT_T2H_MSG_TYPE_PEER_STATS,
+ /* keep this last */
+ HTT_T2H_NUM_MSGS
+};
+
+/*
+ * htt_resp_hdr - header for target-to-host messages
+ *
+ * msg_type: see htt_t2h_msg_type
+ */
+struct htt_resp_hdr {
+ u8 msg_type;
+} __packed;
+
+#define HTT_RESP_HDR_MSG_TYPE_OFFSET 0
+#define HTT_RESP_HDR_MSG_TYPE_MASK 0xff
+#define HTT_RESP_HDR_MSG_TYPE_LSB 0
+
+/* htt_ver_resp - response sent for htt_ver_req */
+struct htt_ver_resp {
+ u8 minor;
+ u8 major;
+ u8 rsvd0;
+} __packed;
+
+#define HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI BIT(0)
+
+#define HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK GENMASK(7, 0)
+
+struct htt_mgmt_tx_completion {
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 flags;
+ __le32 desc_id;
+ __le32 status;
+ __le32 ppdu_id;
+ __le32 info;
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_EXT_TID_MASK (0x1F)
+#define HTT_RX_INDICATION_INFO0_EXT_TID_LSB (0)
+#define HTT_RX_INDICATION_INFO0_FLUSH_VALID (1 << 5)
+#define HTT_RX_INDICATION_INFO0_RELEASE_VALID (1 << 6)
+
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_MASK 0x0000003F
+#define HTT_RX_INDICATION_INFO1_FLUSH_START_SEQNO_LSB 0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_MASK 0x00000FC0
+#define HTT_RX_INDICATION_INFO1_FLUSH_END_SEQNO_LSB 6
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_MASK 0x0003F000
+#define HTT_RX_INDICATION_INFO1_RELEASE_START_SEQNO_LSB 12
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_MASK 0x00FC0000
+#define HTT_RX_INDICATION_INFO1_RELEASE_END_SEQNO_LSB 18
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES_LSB 24
+
+struct htt_rx_indication_hdr {
+ u8 info0; /* %HTT_RX_INDICATION_INFO0_ */
+ __le16 peer_id;
+ __le32 info1; /* %HTT_RX_INDICATION_INFO1_ */
+} __packed;
+
+#define HTT_RX_INDICATION_INFO0_PHY_ERR_VALID (1 << 0)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_MASK (0x1E)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_LSB (1)
+#define HTT_RX_INDICATION_INFO0_LEGACY_RATE_CCK (1 << 5)
+#define HTT_RX_INDICATION_INFO0_END_VALID (1 << 6)
+#define HTT_RX_INDICATION_INFO0_START_VALID (1 << 7)
+
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO1_VHT_SIG_A1_LSB 0
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO1_PREAMBLE_TYPE_LSB 24
+
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_MASK 0x00FFFFFF
+#define HTT_RX_INDICATION_INFO2_VHT_SIG_A1_LSB 0
+#define HTT_RX_INDICATION_INFO2_SERVICE_MASK 0xFF000000
+#define HTT_RX_INDICATION_INFO2_SERVICE_LSB 24
+
+enum htt_rx_legacy_rate {
+ HTT_RX_OFDM_48 = 0,
+ HTT_RX_OFDM_24 = 1,
+ HTT_RX_OFDM_12,
+ HTT_RX_OFDM_6,
+ HTT_RX_OFDM_54,
+ HTT_RX_OFDM_36,
+ HTT_RX_OFDM_18,
+ HTT_RX_OFDM_9,
+
+ /* long preamble */
+ HTT_RX_CCK_11_LP = 0,
+ HTT_RX_CCK_5_5_LP = 1,
+ HTT_RX_CCK_2_LP,
+ HTT_RX_CCK_1_LP,
+ /* short preamble */
+ HTT_RX_CCK_11_SP,
+ HTT_RX_CCK_5_5_SP,
+ HTT_RX_CCK_2_SP
+};
+
+enum htt_rx_legacy_rate_type {
+ HTT_RX_LEGACY_RATE_OFDM = 0,
+ HTT_RX_LEGACY_RATE_CCK
+};
+
+enum htt_rx_preamble_type {
+ HTT_RX_LEGACY = 0x4,
+ HTT_RX_HT = 0x8,
+ HTT_RX_HT_WITH_TXBF = 0x9,
+ HTT_RX_VHT = 0xC,
+ HTT_RX_VHT_WITH_TXBF = 0xD,
+};
+
+/*
+ * Fields: phy_err_valid, phy_err_code, tsf,
+ * usec_timestamp, sub_usec_timestamp
+ * ..are valid only if end_valid == 1.
+ *
+ * Fields: rssi_chains, legacy_rate_type,
+ * legacy_rate_cck, preamble_type, service,
+ * vht_sig_*
+ * ..are valid only if start_valid == 1;
+ */
+struct htt_rx_indication_ppdu {
+ u8 combined_rssi;
+ u8 sub_usec_timestamp;
+ u8 phy_err_code;
+ u8 info0; /* HTT_RX_INDICATION_INFO0_ */
+ struct {
+ u8 pri20_db;
+ u8 ext20_db;
+ u8 ext40_db;
+ u8 ext80_db;
+ } __packed rssi_chains[4];
+ __le32 tsf;
+ __le32 usec_timestamp;
+ __le32 info1; /* HTT_RX_INDICATION_INFO1_ */
+ __le32 info2; /* HTT_RX_INDICATION_INFO2_ */
+} __packed;
+
+enum htt_rx_mpdu_status {
+ HTT_RX_IND_MPDU_STATUS_UNKNOWN = 0x0,
+ HTT_RX_IND_MPDU_STATUS_OK,
+ HTT_RX_IND_MPDU_STATUS_ERR_FCS,
+ HTT_RX_IND_MPDU_STATUS_ERR_DUP,
+ HTT_RX_IND_MPDU_STATUS_ERR_REPLAY,
+ HTT_RX_IND_MPDU_STATUS_ERR_INV_PEER,
+ /* only accept EAPOL frames */
+ HTT_RX_IND_MPDU_STATUS_UNAUTH_PEER,
+ HTT_RX_IND_MPDU_STATUS_OUT_OF_SYNC,
+ /* Non-data in promiscuous mode */
+ HTT_RX_IND_MPDU_STATUS_MGMT_CTRL,
+ HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR,
+ HTT_RX_IND_MPDU_STATUS_DECRYPT_ERR,
+ HTT_RX_IND_MPDU_STATUS_MPDU_LENGTH_ERR,
+ HTT_RX_IND_MPDU_STATUS_ENCRYPT_REQUIRED_ERR,
+ HTT_RX_IND_MPDU_STATUS_PRIVACY_ERR,
+
+ /*
+ * MISC: discard for unspecified reasons.
+ * Leave this enum value last.
+ */
+ HTT_RX_IND_MPDU_STATUS_ERR_MISC = 0xFF
+};
+
+struct htt_rx_indication_mpdu_range {
+ u8 mpdu_count;
+ u8 mpdu_range_status; /* %htt_rx_mpdu_status */
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct htt_rx_indication_prefix {
+ __le16 fw_rx_desc_bytes;
+ u8 pad0;
+ u8 pad1;
+};
+
+struct htt_rx_indication {
+ struct htt_rx_indication_hdr hdr;
+ struct htt_rx_indication_ppdu ppdu;
+ struct htt_rx_indication_prefix prefix;
+
+ /*
+ * the following fields are both dynamically sized, so
+ * take care addressing them
+ */
+
+ /* the size of this is %fw_rx_desc_bytes */
+ struct fw_rx_desc_base fw_desc;
+
+ /*
+ * %mpdu_ranges starts after &%prefix + roundup(%fw_rx_desc_bytes, 4)
+ * and has %num_mpdu_ranges elements.
+ */
+ struct htt_rx_indication_mpdu_range mpdu_ranges[0];
+} __packed;
+
+static inline struct htt_rx_indication_mpdu_range *
+ htt_rx_ind_get_mpdu_ranges(struct htt_rx_indication *rx_ind)
+{
+ void *ptr = rx_ind;
+
+ ptr += sizeof(rx_ind->hdr)
+ + sizeof(rx_ind->ppdu)
+ + sizeof(rx_ind->prefix)
+ + roundup(__le16_to_cpu(rx_ind->prefix.fw_rx_desc_bytes), 4);
+ return ptr;
+}
+
+enum htt_rx_flush_mpdu_status {
+ HTT_RX_FLUSH_MPDU_DISCARD = 0,
+ HTT_RX_FLUSH_MPDU_REORDER = 1,
+};
+
+/*
+ * htt_rx_flush - discard or reorder given range of mpdus
+ *
+ * Note: host must check if all sequence numbers between
+ * [seq_num_start, seq_num_end-1] are valid.
+ */
+struct htt_rx_flush {
+ __le16 peer_id;
+ u8 tid;
+ u8 rsvd0;
+ u8 mpdu_status; /* %htt_rx_flush_mpdu_status */
+ u8 seq_num_start; /* it is 6 LSBs of 802.11 seq no */
+ u8 seq_num_end; /* it is 6 LSBs of 802.11 seq no */
+};
+
+struct htt_rx_peer_map {
+ u8 vdev_id;
+ __le16 peer_id;
+ u8 addr[6];
+ u8 rsvd0;
+ u8 rsvd1;
+} __packed;
+
+struct htt_rx_peer_unmap {
+ u8 rsvd0;
+ __le16 peer_id;
+} __packed;
+
+enum htt_security_types {
+ HTT_SECURITY_NONE,
+ HTT_SECURITY_WEP128,
+ HTT_SECURITY_WEP104,
+ HTT_SECURITY_WEP40,
+ HTT_SECURITY_TKIP,
+ HTT_SECURITY_TKIP_NOMIC,
+ HTT_SECURITY_AES_CCMP,
+ HTT_SECURITY_WAPI,
+
+ HTT_NUM_SECURITY_TYPES /* keep this last! */
+};
+
+enum htt_security_flags {
+#define HTT_SECURITY_TYPE_MASK 0x7F
+#define HTT_SECURITY_TYPE_LSB 0
+ HTT_SECURITY_IS_UNICAST = 1 << 7
+};
+
+struct htt_security_indication {
+ union {
+ /* dont use bitfields; undefined behaviour */
+ u8 flags; /* %htt_security_flags */
+ struct {
+ u8 security_type:7, /* %htt_security_types */
+ is_unicast:1;
+ } __packed;
+ } __packed;
+ __le16 peer_id;
+ u8 michael_key[8];
+ u8 wapi_rsc[16];
+} __packed;
+
+#define HTT_RX_BA_INFO0_TID_MASK 0x000F
+#define HTT_RX_BA_INFO0_TID_LSB 0
+#define HTT_RX_BA_INFO0_PEER_ID_MASK 0xFFF0
+#define HTT_RX_BA_INFO0_PEER_ID_LSB 4
+
+struct htt_rx_addba {
+ u8 window_size;
+ __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+struct htt_rx_delba {
+ u8 rsvd0;
+ __le16 info0; /* %HTT_RX_BA_INFO0_ */
+} __packed;
+
+enum htt_data_tx_status {
+ HTT_DATA_TX_STATUS_OK = 0,
+ HTT_DATA_TX_STATUS_DISCARD = 1,
+ HTT_DATA_TX_STATUS_NO_ACK = 2,
+ HTT_DATA_TX_STATUS_POSTPONE = 3, /* HL only */
+ HTT_DATA_TX_STATUS_DOWNLOAD_FAIL = 128
+};
+
+enum htt_data_tx_flags {
+#define HTT_DATA_TX_STATUS_MASK 0x07
+#define HTT_DATA_TX_STATUS_LSB 0
+#define HTT_DATA_TX_TID_MASK 0x78
+#define HTT_DATA_TX_TID_LSB 3
+ HTT_DATA_TX_TID_INVALID = 1 << 7
+};
+
+#define HTT_TX_COMPL_INV_MSDU_ID 0xFFFF
+
+struct htt_data_tx_completion {
+ union {
+ u8 flags;
+ struct {
+ u8 status:3,
+ tid:4,
+ tid_invalid:1;
+ } __packed;
+ } __packed;
+ u8 num_msdus;
+ u8 rsvd0;
+ __le16 msdus[0]; /* variable length based on %num_msdus */
+} __packed;
+
+struct htt_tx_compl_ind_base {
+ u32 hdr;
+ u16 payload[1/*or more*/];
+} __packed;
+
+struct htt_rc_tx_done_params {
+ u32 rate_code;
+ u32 rate_code_flags;
+ u32 flags;
+ u32 num_enqued; /* 1 for non-AMPDU */
+ u32 num_retries;
+ u32 num_failed; /* for AMPDU */
+ u32 ack_rssi;
+ u32 time_stamp;
+ u32 is_probe;
+};
+
+struct htt_rc_update {
+ u8 vdev_id;
+ __le16 peer_id;
+ u8 addr[6];
+ u8 num_elems;
+ u8 rsvd0;
+ struct htt_rc_tx_done_params params[0]; /* variable length %num_elems */
+} __packed;
+
+/* see htt_rx_indication for similar fields and descriptions */
+struct htt_rx_fragment_indication {
+ union {
+ u8 info0; /* %HTT_RX_FRAG_IND_INFO0_ */
+ struct {
+ u8 ext_tid:5,
+ flush_valid:1;
+ } __packed;
+ } __packed;
+ __le16 peer_id;
+ __le32 info1; /* %HTT_RX_FRAG_IND_INFO1_ */
+ __le16 fw_rx_desc_bytes;
+ __le16 rsvd0;
+
+ u8 fw_msdu_rx_desc[0];
+} __packed;
+
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_MASK 0x1F
+#define HTT_RX_FRAG_IND_INFO0_EXT_TID_LSB 0
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_MASK 0x20
+#define HTT_RX_FRAG_IND_INFO0_FLUSH_VALID_LSB 5
+
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_MASK 0x0000003F
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_START_LSB 0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
+#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
+
+struct htt_rx_pn_ind {
+ __le16 peer_id;
+ u8 tid;
+ u8 seqno_start;
+ u8 seqno_end;
+ u8 pn_ie_count;
+ u8 reserved;
+ u8 pn_ies[0];
+} __packed;
+
+struct htt_rx_offload_msdu {
+ __le16 msdu_len;
+ __le16 peer_id;
+ u8 vdev_id;
+ u8 tid;
+ u8 fw_desc;
+ u8 payload[0];
+} __packed;
+
+struct htt_rx_offload_ind {
+ u8 reserved;
+ __le16 msdu_count;
+} __packed;
+
+struct htt_rx_in_ord_msdu_desc {
+ __le32 msdu_paddr;
+ __le16 msdu_len;
+ u8 fw_desc;
+ u8 reserved;
+} __packed;
+
+struct htt_rx_in_ord_msdu_desc_ext {
+ __le64 msdu_paddr;
+ __le16 msdu_len;
+ u8 fw_desc;
+ u8 reserved;
+} __packed;
+
+struct htt_rx_in_ord_ind {
+ u8 info;
+ __le16 peer_id;
+ u8 vdev_id;
+ u8 reserved;
+ __le16 msdu_count;
+ union {
+ struct htt_rx_in_ord_msdu_desc msdu_descs32[0];
+ struct htt_rx_in_ord_msdu_desc_ext msdu_descs64[0];
+ } __packed;
+} __packed;
+
+#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
+#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
+#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
+#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
+
+/*
+ * target -> host test message definition
+ *
+ * The following field definitions describe the format of the test
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header, followed by a variable
+ * number of 32-bit integer values, followed by a variable number
+ * of 8-bit character values.
+ *
+ * |31 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | num chars | num ints | msg type |
+ * |-----------------------------------------------------------|
+ * | int 0 |
+ * |-----------------------------------------------------------|
+ * | int 1 |
+ * |-----------------------------------------------------------|
+ * | ... |
+ * |-----------------------------------------------------------|
+ * | char 3 | char 2 | char 1 | char 0 |
+ * |-----------------------------------------------------------|
+ * | | | ... | char 4 |
+ * |-----------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a test message
+ * Value: HTT_MSG_TYPE_TEST
+ * - NUM_INTS
+ * Bits 15:8
+ * Purpose: indicate how many 32-bit integers follow the message header
+ * - NUM_CHARS
+ * Bits 31:16
+ * Purpose: indicate how many 8-bit characters follow the series of integers
+ */
+struct htt_rx_test {
+ u8 num_ints;
+ __le16 num_chars;
+
+ /* payload consists of 2 lists:
+ * a) num_ints * sizeof(__le32)
+ * b) num_chars * sizeof(u8) aligned to 4bytes
+ */
+ u8 payload[0];
+} __packed;
+
+static inline __le32 *htt_rx_test_get_ints(struct htt_rx_test *rx_test)
+{
+ return (__le32 *)rx_test->payload;
+}
+
+static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
+{
+ return rx_test->payload + (rx_test->num_ints * sizeof(__le32));
+}
+
+/*
+ * target -> host packet log message
+ *
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | | | | msg type |
+ * |-----------------------------------------------------------|
+ * | payload |
+ * |-----------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a test message
+ * Value: HTT_MSG_TYPE_PACKETLOG
+ */
+struct htt_pktlog_msg {
+ u8 pad[3];
+ u8 payload[0];
+} __packed;
+
+struct htt_dbg_stats_rx_reorder_stats {
+ /* Non QoS MPDUs received */
+ __le32 deliver_non_qos;
+
+ /* MPDUs received in-order */
+ __le32 deliver_in_order;
+
+ /* Flush due to reorder timer expired */
+ __le32 deliver_flush_timeout;
+
+ /* Flush due to move out of window */
+ __le32 deliver_flush_oow;
+
+ /* Flush due to DELBA */
+ __le32 deliver_flush_delba;
+
+ /* MPDUs dropped due to FCS error */
+ __le32 fcs_error;
+
+ /* MPDUs dropped due to monitor mode non-data packet */
+ __le32 mgmt_ctrl;
+
+ /* MPDUs dropped due to invalid peer */
+ __le32 invalid_peer;
+
+ /* MPDUs dropped due to duplication (non aggregation) */
+ __le32 dup_non_aggr;
+
+ /* MPDUs dropped due to processed before */
+ __le32 dup_past;
+
+ /* MPDUs dropped due to duplicate in reorder queue */
+ __le32 dup_in_reorder;
+
+ /* Reorder timeout happened */
+ __le32 reorder_timeout;
+
+ /* invalid bar ssn */
+ __le32 invalid_bar_ssn;
+
+ /* reorder reset due to bar ssn */
+ __le32 ssn_reset;
+};
+
+struct htt_dbg_stats_wal_tx_stats {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ __le32 mpdus_requed;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev continuous xretry */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+} __packed;
+
+struct htt_dbg_stats_wal_rx_stats {
+ /* Cnts any change in ring routing mid-ppdu */
+ __le32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ __le32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ __le32 r0_frags;
+ __le32 r1_frags;
+ __le32 r2_frags;
+ __le32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ __le32 htt_msdus;
+ __le32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ __le32 loc_msdus;
+ __le32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ __le32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ __le32 phy_errs;
+
+ /* Number of PHY errors drops */
+ __le32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ __le32 mpdu_errs;
+} __packed;
+
+struct htt_dbg_stats_wal_peer_stats {
+ __le32 dummy; /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+} __packed;
+
+struct htt_dbg_stats_wal_pdev_txrx {
+ struct htt_dbg_stats_wal_tx_stats tx_stats;
+ struct htt_dbg_stats_wal_rx_stats rx_stats;
+ struct htt_dbg_stats_wal_peer_stats peer_stats;
+} __packed;
+
+struct htt_dbg_stats_rx_rate_info {
+ __le32 mcs[10];
+ __le32 sgi[10];
+ __le32 nss[4];
+ __le32 stbc[10];
+ __le32 bw[3];
+ __le32 pream[6];
+ __le32 ldpc;
+ __le32 txbf;
+};
+
+/*
+ * htt_dbg_stats_status -
+ * present - The requested stats have been delivered in full.
+ * This indicates that either the stats information was contained
+ * in its entirety within this message, or else this message
+ * completes the delivery of the requested stats info that was
+ * partially delivered through earlier STATS_CONF messages.
+ * partial - The requested stats have been delivered in part.
+ * One or more subsequent STATS_CONF messages with the same
+ * cookie value will be sent to deliver the remainder of the
+ * information.
+ * error - The requested stats could not be delivered, for example due
+ * to a shortage of memory to construct a message holding the
+ * requested stats.
+ * invalid - The requested stat type is either not recognized, or the
+ * target is configured to not gather the stats type in question.
+ * - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
+ * series_done - This special value indicates that no further stats info
+ * elements are present within a series of stats info elems
+ * (within a stats upload confirmation message).
+ */
+enum htt_dbg_stats_status {
+ HTT_DBG_STATS_STATUS_PRESENT = 0,
+ HTT_DBG_STATS_STATUS_PARTIAL = 1,
+ HTT_DBG_STATS_STATUS_ERROR = 2,
+ HTT_DBG_STATS_STATUS_INVALID = 3,
+ HTT_DBG_STATS_STATUS_SERIES_DONE = 7
+};
+
+/*
+ * target -> host statistics upload
+ *
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a series of tag-length-value stats information elements.
+ * The tag-length header for each stats info element also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A special value of all 1's in this status field is used to indicate
+ * the end of the series of stats info elements.
+ *
+ *
+ * |31 16|15 8|7 5|4 0|
+ * |------------------------------------------------------------|
+ * | reserved | msg type |
+ * |------------------------------------------------------------|
+ * | cookie LSBs |
+ * |------------------------------------------------------------|
+ * | cookie MSBs |
+ * |------------------------------------------------------------|
+ * | stats entry length | reserved | S |stat type|
+ * |------------------------------------------------------------|
+ * | |
+ * | type-specific stats info |
+ * | |
+ * |------------------------------------------------------------|
+ * | stats entry length | reserved | S |stat type|
+ * |------------------------------------------------------------|
+ * | |
+ * | type-specific stats info |
+ * | |
+ * |------------------------------------------------------------|
+ * | n/a | reserved | 111 | n/a |
+ * |------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a statistics upload confirmation message
+ * Value: 0x9
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 4:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: htt_dbg_stats_type
+ * - STATUS
+ * Bits 7:5
+ * Purpose: indicate whether the requested stats are present
+ * Value: htt_dbg_stats_status, including a special value (0x7) to mark
+ * the completion of the stats entry series
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes. Even if the length is not an integer multiple of 4, the
+ * subsequent stats entry header will begin on a 4-byte aligned
+ * boundary.
+ */
+
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_MASK 0x1F
+#define HTT_STATS_CONF_ITEM_INFO_STAT_TYPE_LSB 0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_MASK 0xE0
+#define HTT_STATS_CONF_ITEM_INFO_STATUS_LSB 5
+
+struct htt_stats_conf_item {
+ union {
+ u8 info;
+ struct {
+ u8 stat_type:5; /* %HTT_DBG_STATS_ */
+ u8 status:3; /* %HTT_DBG_STATS_STATUS_ */
+ } __packed;
+ } __packed;
+ u8 pad;
+ __le16 length;
+ u8 payload[0]; /* roundup(length, 4) long */
+} __packed;
+
+struct htt_stats_conf {
+ u8 pad[3];
+ __le32 cookie_lsb;
+ __le32 cookie_msb;
+
+ /* each item has variable length! */
+ struct htt_stats_conf_item items[0];
+} __packed;
+
+static inline struct htt_stats_conf_item *htt_stats_conf_next_item(
+ const struct htt_stats_conf_item *item)
+{
+ return (void *)item + sizeof(*item) + roundup(item->length, 4);
+}
+
+/*
+ * host -> target FRAG DESCRIPTOR/MSDU_EXT DESC bank
+ *
+ * The following field definitions describe the format of the HTT host
+ * to target frag_desc/msdu_ext bank configuration message.
+ * The message contains the based address and the min and max id of the
+ * MSDU_EXT/FRAG_DESC that will be used by the HTT to map MSDU DESC and
+ * MSDU_EXT/FRAG_DESC.
+ * HTT will use id in HTT descriptor instead sending the frag_desc_ptr.
+ * For QCA988X HW the firmware will use fragment_desc_ptr but in WIFI2.0
+ * the hardware does the mapping/translation.
+ *
+ * Total banks that can be configured is configured to 16.
+ *
+ * This should be called before any TX has be initiated by the HTT
+ *
+ * |31 16|15 8|7 5|4 0|
+ * |------------------------------------------------------------|
+ * | DESC_SIZE | NUM_BANKS | RES |SWP|pdev| msg type |
+ * |------------------------------------------------------------|
+ * | BANK0_BASE_ADDRESS |
+ * |------------------------------------------------------------|
+ * | ... |
+ * |------------------------------------------------------------|
+ * | BANK15_BASE_ADDRESS |
+ * |------------------------------------------------------------|
+ * | BANK0_MAX_ID | BANK0_MIN_ID |
+ * |------------------------------------------------------------|
+ * | ... |
+ * |------------------------------------------------------------|
+ * | BANK15_MAX_ID | BANK15_MIN_ID |
+ * |------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Value: 0x6
+ * - BANKx_BASE_ADDRESS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to specify the base address of the MSDU_EXT
+ * bank physical/bus address.
+ * - BANKx_MIN_ID
+ * Bits 15:0
+ * Purpose: Provide a mechanism to specify the min index that needs to
+ * mapped.
+ * - BANKx_MAX_ID
+ * Bits 31:16
+ * Purpose: Provide a mechanism to specify the max index that needs to
+ *
+ */
+struct htt_frag_desc_bank_id {
+ __le16 bank_min_id;
+ __le16 bank_max_id;
+} __packed;
+
+/* real is 16 but it wouldn't fit in the max htt message size
+ * so we use a conservatively safe value for now
+ */
+#define HTT_FRAG_DESC_BANK_MAX 4
+
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_MASK 0x03
+#define HTT_FRAG_DESC_BANK_CFG_INFO_PDEV_ID_LSB 0
+#define HTT_FRAG_DESC_BANK_CFG_INFO_SWAP BIT(2)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID BIT(3)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_MASK BIT(4)
+#define HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE_LSB 4
+
+enum htt_q_depth_type {
+ HTT_Q_DEPTH_TYPE_BYTES = 0,
+ HTT_Q_DEPTH_TYPE_MSDUS = 1,
+};
+
+#define HTT_TX_Q_STATE_NUM_PEERS (TARGET_10_4_NUM_QCACHE_PEERS_MAX + \
+ TARGET_10_4_NUM_VDEVS)
+#define HTT_TX_Q_STATE_NUM_TIDS 8
+#define HTT_TX_Q_STATE_ENTRY_SIZE 1
+#define HTT_TX_Q_STATE_ENTRY_MULTIPLIER 0
+
+/**
+ * htt_q_state_conf - part of htt_frag_desc_bank_cfg for host q state config
+ *
+ * Defines host q state format and behavior. See htt_q_state.
+ *
+ * @record_size: Defines the size of each host q entry in bytes. In practice
+ * however firmware (at least 10.4.3-00191) ignores this host
+ * configuration value and uses hardcoded value of 1.
+ * @record_multiplier: This is valid only when q depth type is MSDUs. It
+ * defines the exponent for the power of 2 multiplication.
+ */
+struct htt_q_state_conf {
+ __le32 paddr;
+ __le16 num_peers;
+ __le16 num_tids;
+ u8 record_size;
+ u8 record_multiplier;
+ u8 pad[2];
+} __packed;
+
+struct htt_frag_desc_bank_cfg32 {
+ u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+ u8 num_banks;
+ u8 desc_size;
+ __le32 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_q_state_conf q_state;
+} __packed;
+
+struct htt_frag_desc_bank_cfg64 {
+ u8 info; /* HTT_FRAG_DESC_BANK_CFG_INFO_ */
+ u8 num_banks;
+ u8 desc_size;
+ __le64 bank_base_addrs[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_frag_desc_bank_id bank_id[HTT_FRAG_DESC_BANK_MAX];
+ struct htt_q_state_conf q_state;
+} __packed;
+
+#define HTT_TX_Q_STATE_ENTRY_COEFFICIENT 128
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_MASK 0x3f
+#define HTT_TX_Q_STATE_ENTRY_FACTOR_LSB 0
+#define HTT_TX_Q_STATE_ENTRY_EXP_MASK 0xc0
+#define HTT_TX_Q_STATE_ENTRY_EXP_LSB 6
+
+/**
+ * htt_q_state - shared between host and firmware via DMA
+ *
+ * This structure is used for the host to expose it's software queue state to
+ * firmware so that its rate control can schedule fetch requests for optimized
+ * performance. This is most notably used for MU-MIMO aggregation when multiple
+ * MU clients are connected.
+ *
+ * @count: Each element defines the host queue depth. When q depth type was
+ * configured as HTT_Q_DEPTH_TYPE_BYTES then each entry is defined as:
+ * FACTOR * 128 * 8^EXP (see HTT_TX_Q_STATE_ENTRY_FACTOR_MASK and
+ * HTT_TX_Q_STATE_ENTRY_EXP_MASK). When q depth type was configured as
+ * HTT_Q_DEPTH_TYPE_MSDUS the number of packets is scaled by 2 **
+ * record_multiplier (see htt_q_state_conf).
+ * @map: Used by firmware to quickly check which host queues are not empty. It
+ * is a bitmap simply saying.
+ * @seq: Used by firmware to quickly check if the host queues were updated
+ * since it last checked.
+ *
+ * FIXME: Is the q_state map[] size calculation really correct?
+ */
+struct htt_q_state {
+ u8 count[HTT_TX_Q_STATE_NUM_TIDS][HTT_TX_Q_STATE_NUM_PEERS];
+ u32 map[HTT_TX_Q_STATE_NUM_TIDS][(HTT_TX_Q_STATE_NUM_PEERS + 31) / 32];
+ __le32 seq;
+} __packed;
+
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_MASK 0x0fff
+#define HTT_TX_FETCH_RECORD_INFO_PEER_ID_LSB 0
+#define HTT_TX_FETCH_RECORD_INFO_TID_MASK 0xf000
+#define HTT_TX_FETCH_RECORD_INFO_TID_LSB 12
+
+struct htt_tx_fetch_record {
+ __le16 info; /* HTT_TX_FETCH_IND_RECORD_INFO_ */
+ __le16 num_msdus;
+ __le32 num_bytes;
+} __packed;
+
+struct htt_tx_fetch_ind {
+ u8 pad0;
+ __le16 fetch_seq_num;
+ __le32 token;
+ __le16 num_resp_ids;
+ __le16 num_records;
+ struct htt_tx_fetch_record records[0];
+ __le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
+} __packed;
+
+static inline void *
+ath10k_htt_get_tx_fetch_ind_resp_ids(struct htt_tx_fetch_ind *ind)
+{
+ return (void *)&ind->records[le16_to_cpu(ind->num_records)];
+}
+
+struct htt_tx_fetch_resp {
+ u8 pad0;
+ __le16 resp_id;
+ __le16 fetch_seq_num;
+ __le16 num_records;
+ __le32 token;
+ struct htt_tx_fetch_record records[0];
+} __packed;
+
+struct htt_tx_fetch_confirm {
+ u8 pad0;
+ __le16 num_resp_ids;
+ __le32 resp_ids[0];
+} __packed;
+
+enum htt_tx_mode_switch_mode {
+ HTT_TX_MODE_SWITCH_PUSH = 0,
+ HTT_TX_MODE_SWITCH_PUSH_PULL = 1,
+};
+
+#define HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE BIT(0)
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_MASK 0xfffe
+#define HTT_TX_MODE_SWITCH_IND_INFO0_NUM_RECORDS_LSB 1
+
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_MASK 0x0003
+#define HTT_TX_MODE_SWITCH_IND_INFO1_MODE_LSB 0
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_MASK 0xfffc
+#define HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD_LSB 2
+
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_MASK 0x0fff
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID_LSB 0
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_MASK 0xf000
+#define HTT_TX_MODE_SWITCH_RECORD_INFO0_TID_LSB 12
+
+struct htt_tx_mode_switch_record {
+ __le16 info0; /* HTT_TX_MODE_SWITCH_RECORD_INFO0_ */
+ __le16 num_max_msdus;
+} __packed;
+
+struct htt_tx_mode_switch_ind {
+ u8 pad0;
+ __le16 info0; /* HTT_TX_MODE_SWITCH_IND_INFO0_ */
+ __le16 info1; /* HTT_TX_MODE_SWITCH_IND_INFO1_ */
+ u8 pad1[2];
+ struct htt_tx_mode_switch_record records[0];
+} __packed;
+
+struct htt_channel_change {
+ u8 pad[3];
+ __le32 freq;
+ __le32 center_freq1;
+ __le32 center_freq2;
+ __le32 phymode;
+} __packed;
+
+struct htt_per_peer_tx_stats_ind {
+ __le32 succ_bytes;
+ __le32 retry_bytes;
+ __le32 failed_bytes;
+ u8 ratecode;
+ u8 flags;
+ __le16 peer_id;
+ __le16 succ_pkts;
+ __le16 retry_pkts;
+ __le16 failed_pkts;
+ __le16 tx_duration;
+ __le32 reserved1;
+ __le32 reserved2;
+} __packed;
+
+struct htt_peer_tx_stats {
+ u8 num_ppdu;
+ u8 ppdu_len;
+ u8 version;
+ u8 payload[0];
+} __packed;
+
+#define ATH10K_10_2_TX_STATS_OFFSET 136
+#define PEER_STATS_FOR_NO_OF_PPDUS 4
+
+struct ath10k_10_2_peer_tx_stats {
+ u8 ratecode[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 success_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 success_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 retry_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 retry_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 failed_pkts[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le16 failed_bytes[PEER_STATS_FOR_NO_OF_PPDUS];
+ u8 flags[PEER_STATS_FOR_NO_OF_PPDUS];
+ __le32 tx_duration;
+ u8 tx_ppdu_cnt;
+ u8 peer_id;
+} __packed;
+
+union htt_rx_pn_t {
+ /* WEP: 24-bit PN */
+ u32 pn24;
+
+ /* TKIP or CCMP: 48-bit PN */
+ u64 pn48;
+
+ /* WAPI: 128-bit PN */
+ u64 pn128[2];
+};
+
+struct htt_cmd {
+ struct htt_cmd_hdr hdr;
+ union {
+ struct htt_ver_req ver_req;
+ struct htt_mgmt_tx_desc mgmt_tx;
+ struct htt_data_tx_desc data_tx;
+ struct htt_rx_ring_setup_32 rx_setup_32;
+ struct htt_rx_ring_setup_64 rx_setup_64;
+ struct htt_stats_req stats_req;
+ struct htt_oob_sync_req oob_sync_req;
+ struct htt_aggr_conf aggr_conf;
+ struct htt_frag_desc_bank_cfg32 frag_desc_bank_cfg32;
+ struct htt_frag_desc_bank_cfg64 frag_desc_bank_cfg64;
+ struct htt_tx_fetch_resp tx_fetch_resp;
+ };
+} __packed;
+
+struct htt_resp {
+ struct htt_resp_hdr hdr;
+ union {
+ struct htt_ver_resp ver_resp;
+ struct htt_mgmt_tx_completion mgmt_tx_completion;
+ struct htt_data_tx_completion data_tx_completion;
+ struct htt_rx_indication rx_ind;
+ struct htt_rx_fragment_indication rx_frag_ind;
+ struct htt_rx_peer_map peer_map;
+ struct htt_rx_peer_unmap peer_unmap;
+ struct htt_rx_flush rx_flush;
+ struct htt_rx_addba rx_addba;
+ struct htt_rx_delba rx_delba;
+ struct htt_security_indication security_indication;
+ struct htt_rc_update rc_update;
+ struct htt_rx_test rx_test;
+ struct htt_pktlog_msg pktlog_msg;
+ struct htt_stats_conf stats_conf;
+ struct htt_rx_pn_ind rx_pn_ind;
+ struct htt_rx_offload_ind rx_offload_ind;
+ struct htt_rx_in_ord_ind rx_in_ord_ind;
+ struct htt_tx_fetch_ind tx_fetch_ind;
+ struct htt_tx_fetch_confirm tx_fetch_confirm;
+ struct htt_tx_mode_switch_ind tx_mode_switch_ind;
+ struct htt_channel_change chan_change;
+ struct htt_peer_tx_stats peer_tx_stats;
+ };
+} __packed;
+
+/*** host side structures follow ***/
+
+struct htt_tx_done {
+ u16 msdu_id;
+ u16 status;
+ u8 ack_rssi;
+};
+
+enum htt_tx_compl_state {
+ HTT_TX_COMPL_STATE_NONE,
+ HTT_TX_COMPL_STATE_ACK,
+ HTT_TX_COMPL_STATE_NOACK,
+ HTT_TX_COMPL_STATE_DISCARD,
+};
+
+struct htt_peer_map_event {
+ u8 vdev_id;
+ u16 peer_id;
+ u8 addr[ETH_ALEN];
+};
+
+struct htt_peer_unmap_event {
+ u16 peer_id;
+};
+
+struct ath10k_htt_txbuf_32 {
+ struct htt_data_tx_desc_frag frags[2];
+ struct ath10k_htc_hdr htc_hdr;
+ struct htt_cmd_hdr cmd_hdr;
+ struct htt_data_tx_desc cmd_tx;
+} __packed;
+
+struct ath10k_htt_txbuf_64 {
+ struct htt_data_tx_desc_frag frags[2];
+ struct ath10k_htc_hdr htc_hdr;
+ struct htt_cmd_hdr cmd_hdr;
+ struct htt_data_tx_desc_64 cmd_tx;
+} __packed;
+
+struct ath10k_htt {
+ struct ath10k *ar;
+ enum ath10k_htc_ep_id eid;
+
+ u8 target_version_major;
+ u8 target_version_minor;
+ struct completion target_version_received;
+ u8 max_num_amsdu;
+ u8 max_num_ampdu;
+
+ const enum htt_t2h_msg_type *t2h_msg_types;
+ u32 t2h_msg_types_max;
+
+ struct {
+ /*
+ * Ring of network buffer objects - This ring is
+ * used exclusively by the host SW. This ring
+ * mirrors the dev_addrs_ring that is shared
+ * between the host SW and the MAC HW. The host SW
+ * uses this netbufs ring to locate the network
+ * buffer objects whose data buffers the HW has
+ * filled.
+ */
+ struct sk_buff **netbufs_ring;
+
+ /* This is used only with firmware supporting IN_ORD_IND.
+ *
+ * With Full Rx Reorder the HTT Rx Ring is more of a temporary
+ * buffer ring from which buffer addresses are copied by the
+ * firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
+ * pointing to specific (re-ordered) buffers.
+ *
+ * FIXME: With kernel generic hashing functions there's a lot
+ * of hash collisions for sk_buffs.
+ */
+ bool in_ord_rx;
+ DECLARE_HASHTABLE(skb_table, 4);
+
+ /*
+ * Ring of buffer addresses -
+ * This ring holds the "physical" device address of the
+ * rx buffers the host SW provides for the MAC HW to
+ * fill.
+ */
+ union {
+ __le64 *paddrs_ring_64;
+ __le32 *paddrs_ring_32;
+ };
+
+ /*
+ * Base address of ring, as a "physical" device address
+ * rather than a CPU address.
+ */
+ dma_addr_t base_paddr;
+
+ /* how many elems in the ring (power of 2) */
+ int size;
+
+ /* size - 1 */
+ unsigned int size_mask;
+
+ /* how many rx buffers to keep in the ring */
+ int fill_level;
+
+ /* how many rx buffers (full+empty) are in the ring */
+ int fill_cnt;
+
+ /*
+ * alloc_idx - where HTT SW has deposited empty buffers
+ * This is allocated in consistent mem, so that the FW can
+ * read this variable, and program the HW's FW_IDX reg with
+ * the value of this shadow register.
+ */
+ struct {
+ __le32 *vaddr;
+ dma_addr_t paddr;
+ } alloc_idx;
+
+ /* where HTT SW has processed bufs filled by rx MAC DMA */
+ struct {
+ unsigned int msdu_payld;
+ } sw_rd_idx;
+
+ /*
+ * refill_retry_timer - timer triggered when the ring is
+ * not refilled to the level expected
+ */
+ struct timer_list refill_retry_timer;
+
+ /* Protects access to all rx ring buffer state variables */
+ spinlock_t lock;
+ } rx_ring;
+
+ unsigned int prefetch_len;
+
+ /* Protects access to pending_tx, num_pending_tx */
+ spinlock_t tx_lock;
+ int max_num_pending_tx;
+ int num_pending_tx;
+ int num_pending_mgmt_tx;
+ struct idr pending_tx;
+ wait_queue_head_t empty_tx_wq;
+
+ /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
+ DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
+
+ /* set if host-fw communication goes haywire
+ * used to avoid further failures
+ */
+ bool rx_confused;
+ atomic_t num_mpdus_ready;
+
+ /* This is used to group tx/rx completions separately and process them
+ * in batches to reduce cache stalls
+ */
+ struct sk_buff_head rx_msdus_q;
+ struct sk_buff_head rx_in_ord_compl_q;
+ struct sk_buff_head tx_fetch_ind_q;
+
+ /* rx_status template */
+ struct ieee80211_rx_status rx_status;
+
+ struct {
+ dma_addr_t paddr;
+ union {
+ struct htt_msdu_ext_desc *vaddr_desc_32;
+ struct htt_msdu_ext_desc_64 *vaddr_desc_64;
+ };
+ size_t size;
+ } frag_desc;
+
+ struct {
+ dma_addr_t paddr;
+ union {
+ struct ath10k_htt_txbuf_32 *vaddr_txbuff_32;
+ struct ath10k_htt_txbuf_64 *vaddr_txbuff_64;
+ };
+ size_t size;
+ } txbuf;
+
+ struct {
+ bool enabled;
+ struct htt_q_state *vaddr;
+ dma_addr_t paddr;
+ u16 num_push_allowed;
+ u16 num_peers;
+ u16 num_tids;
+ enum htt_tx_mode_switch_mode mode;
+ enum htt_q_depth_type type;
+ } tx_q_state;
+
+ bool tx_mem_allocated;
+ const struct ath10k_htt_tx_ops *tx_ops;
+ const struct ath10k_htt_rx_ops *rx_ops;
+};
+
+struct ath10k_htt_tx_ops {
+ int (*htt_send_rx_ring_cfg)(struct ath10k_htt *htt);
+ int (*htt_send_frag_desc_bank_cfg)(struct ath10k_htt *htt);
+ int (*htt_alloc_frag_desc)(struct ath10k_htt *htt);
+ void (*htt_free_frag_desc)(struct ath10k_htt *htt);
+ int (*htt_tx)(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu);
+ int (*htt_alloc_txbuff)(struct ath10k_htt *htt);
+ void (*htt_free_txbuff)(struct ath10k_htt *htt);
+};
+
+static inline int ath10k_htt_send_rx_ring_cfg(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_send_rx_ring_cfg)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_send_rx_ring_cfg(htt);
+}
+
+static inline int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_send_frag_desc_bank_cfg)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_send_frag_desc_bank_cfg(htt);
+}
+
+static inline int ath10k_htt_alloc_frag_desc(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_alloc_frag_desc)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_alloc_frag_desc(htt);
+}
+
+static inline void ath10k_htt_free_frag_desc(struct ath10k_htt *htt)
+{
+ if (htt->tx_ops->htt_free_frag_desc)
+ htt->tx_ops->htt_free_frag_desc(htt);
+}
+
+static inline int ath10k_htt_tx(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ return htt->tx_ops->htt_tx(htt, txmode, msdu);
+}
+
+static inline int ath10k_htt_alloc_txbuff(struct ath10k_htt *htt)
+{
+ if (!htt->tx_ops->htt_alloc_txbuff)
+ return -EOPNOTSUPP;
+
+ return htt->tx_ops->htt_alloc_txbuff(htt);
+}
+
+static inline void ath10k_htt_free_txbuff(struct ath10k_htt *htt)
+{
+ if (htt->tx_ops->htt_free_txbuff)
+ htt->tx_ops->htt_free_txbuff(htt);
+}
+
+struct ath10k_htt_rx_ops {
+ size_t (*htt_get_rx_ring_size)(struct ath10k_htt *htt);
+ void (*htt_config_paddrs_ring)(struct ath10k_htt *htt, void *vaddr);
+ void (*htt_set_paddrs_ring)(struct ath10k_htt *htt, dma_addr_t paddr,
+ int idx);
+ void* (*htt_get_vaddr_ring)(struct ath10k_htt *htt);
+ void (*htt_reset_paddrs_ring)(struct ath10k_htt *htt, int idx);
+};
+
+static inline size_t ath10k_htt_get_rx_ring_size(struct ath10k_htt *htt)
+{
+ if (!htt->rx_ops->htt_get_rx_ring_size)
+ return 0;
+
+ return htt->rx_ops->htt_get_rx_ring_size(htt);
+}
+
+static inline void ath10k_htt_config_paddrs_ring(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ if (htt->rx_ops->htt_config_paddrs_ring)
+ htt->rx_ops->htt_config_paddrs_ring(htt, vaddr);
+}
+
+static inline void ath10k_htt_set_paddrs_ring(struct ath10k_htt *htt,
+ dma_addr_t paddr,
+ int idx)
+{
+ if (htt->rx_ops->htt_set_paddrs_ring)
+ htt->rx_ops->htt_set_paddrs_ring(htt, paddr, idx);
+}
+
+static inline void *ath10k_htt_get_vaddr_ring(struct ath10k_htt *htt)
+{
+ if (!htt->rx_ops->htt_get_vaddr_ring)
+ return NULL;
+
+ return htt->rx_ops->htt_get_vaddr_ring(htt);
+}
+
+static inline void ath10k_htt_reset_paddrs_ring(struct ath10k_htt *htt, int idx)
+{
+ if (htt->rx_ops->htt_reset_paddrs_ring)
+ htt->rx_ops->htt_reset_paddrs_ring(htt, idx);
+}
+
+#define RX_HTT_HDR_STATUS_LEN 64
+
+/* This structure layout is programmed via rx ring setup
+ * so that FW knows how to transfer the rx descriptor to the host.
+ * Buffers like this are placed on the rx ring.
+ */
+struct htt_rx_desc {
+ union {
+ /* This field is filled on the host using the msdu buffer
+ * from htt_rx_indication
+ */
+ struct fw_rx_desc_base fw_desc;
+ u32 pad;
+ } __packed;
+ struct {
+ struct rx_attention attention;
+ struct rx_frag_info frag_info;
+ struct rx_mpdu_start mpdu_start;
+ struct rx_msdu_start msdu_start;
+ struct rx_msdu_end msdu_end;
+ struct rx_mpdu_end mpdu_end;
+ struct rx_ppdu_start ppdu_start;
+ struct rx_ppdu_end ppdu_end;
+ } __packed;
+ u8 rx_hdr_status[RX_HTT_HDR_STATUS_LEN];
+ u8 msdu_payload[0];
+};
+
+#define HTT_RX_DESC_ALIGN 8
+
+#define HTT_MAC_ADDR_LEN 6
+
+/*
+ * FIX THIS
+ * Should be: sizeof(struct htt_host_rx_desc) + max rx MSDU size,
+ * rounded up to a cache line size.
+ */
+#define HTT_RX_BUF_SIZE 1920
+#define HTT_RX_MSDU_SIZE (HTT_RX_BUF_SIZE - (int)sizeof(struct htt_rx_desc))
+
+/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
+ * aggregated traffic more nicely.
+ */
+#define ATH10K_HTT_MAX_NUM_REFILL 100
+
+/*
+ * DMA_MAP expects the buffer to be an integral number of cache lines.
+ * Rather than checking the actual cache line size, this code makes a
+ * conservative estimate of what the cache line size could be.
+ */
+#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
+#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
+
+/* These values are default in most firmware revisions and apparently are a
+ * sweet spot performance wise.
+ */
+#define ATH10K_HTT_MAX_NUM_AMSDU_DEFAULT 3
+#define ATH10K_HTT_MAX_NUM_AMPDU_DEFAULT 64
+
+int ath10k_htt_connect(struct ath10k_htt *htt);
+int ath10k_htt_init(struct ath10k *ar);
+int ath10k_htt_setup(struct ath10k_htt *htt);
+
+int ath10k_htt_tx_start(struct ath10k_htt *htt);
+void ath10k_htt_tx_stop(struct ath10k_htt *htt);
+void ath10k_htt_tx_destroy(struct ath10k_htt *htt);
+void ath10k_htt_tx_free(struct ath10k_htt *htt);
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
+int ath10k_htt_rx_ring_refill(struct ath10k *ar);
+void ath10k_htt_rx_free(struct ath10k_htt *htt);
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
+int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu);
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records);
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_sync(struct ath10k *ar);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp);
+
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu);
+void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb);
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget);
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt);
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt);
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
new file mode 100644
index 000000000..0a7551dc0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -0,0 +1,3064 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "htc.h"
+#include "htt.h"
+#include "txrx.h"
+#include "debug.h"
+#include "trace.h"
+#include "mac.h"
+
+#include <linux/log2.h>
+#include <linux/bitfield.h>
+
+/* when under memory pressure rx ring refill may fail and needs a retry */
+#define HTT_RX_RING_REFILL_RETRY_MS 50
+
+#define HTT_RX_RING_REFILL_RESCHED_MS 5
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
+
+static struct sk_buff *
+ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
+{
+ struct ath10k_skb_rxcb *rxcb;
+
+ hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
+ if (rxcb->paddr == paddr)
+ return ATH10K_RXCB_SKB(rxcb);
+
+ WARN_ON_ONCE(1);
+ return NULL;
+}
+
+static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
+{
+ struct sk_buff *skb;
+ struct ath10k_skb_rxcb *rxcb;
+ struct hlist_node *n;
+ int i;
+
+ if (htt->rx_ring.in_ord_rx) {
+ hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
+ skb = ATH10K_RXCB_SKB(rxcb);
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ hash_del(&rxcb->hlist);
+ dev_kfree_skb_any(skb);
+ }
+ } else {
+ for (i = 0; i < htt->rx_ring.size; i++) {
+ skb = htt->rx_ring.netbufs_ring[i];
+ if (!skb)
+ continue;
+
+ rxcb = ATH10K_SKB_RXCB(skb);
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+ }
+
+ htt->rx_ring.fill_cnt = 0;
+ hash_init(htt->rx_ring.skb_table);
+ memset(htt->rx_ring.netbufs_ring, 0,
+ htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
+}
+
+static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
+{
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
+}
+
+static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
+{
+ return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
+}
+
+static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ htt->rx_ring.paddrs_ring_32 = vaddr;
+}
+
+static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
+ void *vaddr)
+{
+ htt->rx_ring.paddrs_ring_64 = vaddr;
+}
+
+static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
+ dma_addr_t paddr, int idx)
+{
+ htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
+}
+
+static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
+ dma_addr_t paddr, int idx)
+{
+ htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
+}
+
+static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
+{
+ htt->rx_ring.paddrs_ring_32[idx] = 0;
+}
+
+static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
+{
+ htt->rx_ring.paddrs_ring_64[idx] = 0;
+}
+
+static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
+{
+ return (void *)htt->rx_ring.paddrs_ring_32;
+}
+
+static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
+{
+ return (void *)htt->rx_ring.paddrs_ring_64;
+}
+
+static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+ struct htt_rx_desc *rx_desc;
+ struct ath10k_skb_rxcb *rxcb;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0, idx;
+
+ /* The Full Rx Reorder firmware has no way of telling the host
+ * implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
+ * To keep things simple make sure ring is always half empty. This
+ * guarantees there'll be no replenishment overruns possible.
+ */
+ BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
+
+ idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ if (idx < 0 || idx >= htt->rx_ring.size) {
+ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
+ idx &= htt->rx_ring.size_mask;
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ while (num > 0) {
+ skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
+ skb->data);
+
+ /* Clear rx_desc attention word before posting to Rx ring */
+ rx_desc = (struct htt_rx_desc *)skb->data;
+ rx_desc->attention.flags = __cpu_to_le32(0);
+
+ paddr = dma_map_single(htt->ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
+ dev_kfree_skb_any(skb);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ rxcb = ATH10K_SKB_RXCB(skb);
+ rxcb->paddr = paddr;
+ htt->rx_ring.netbufs_ring[idx] = skb;
+ ath10k_htt_set_paddrs_ring(htt, paddr, idx);
+ htt->rx_ring.fill_cnt++;
+
+ if (htt->rx_ring.in_ord_rx) {
+ hash_add(htt->rx_ring.skb_table,
+ &ATH10K_SKB_RXCB(skb)->hlist,
+ paddr);
+ }
+
+ num--;
+ idx++;
+ idx &= htt->rx_ring.size_mask;
+ }
+
+fail:
+ /*
+ * Make sure the rx buffer is updated before available buffer
+ * index to avoid any potential rx ring corruption.
+ */
+ mb();
+ *htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
+ return ret;
+}
+
+static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
+{
+ lockdep_assert_held(&htt->rx_ring.lock);
+ return __ath10k_htt_rx_ring_fill_n(htt, num);
+}
+
+static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
+{
+ int ret, num_deficit, num_to_fill;
+
+ /* Refilling the whole RX ring buffer proves to be a bad idea. The
+ * reason is RX may take up significant amount of CPU cycles and starve
+ * other tasks, e.g. TX on an ethernet device while acting as a bridge
+ * with ath10k wlan interface. This ended up with very poor performance
+ * once CPU the host system was overwhelmed with RX on ath10k.
+ *
+ * By limiting the number of refills the replenishing occurs
+ * progressively. This in turns makes use of the fact tasklets are
+ * processed in FIFO order. This means actual RX processing can starve
+ * out refilling. If there's not enough buffers on RX ring FW will not
+ * report RX until it is refilled with enough buffers. This
+ * automatically balances load wrt to CPU power.
+ *
+ * This probably comes at a cost of lower maximum throughput but
+ * improves the average and stability.
+ */
+ spin_lock_bh(&htt->rx_ring.lock);
+ num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
+ num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
+ num_deficit -= num_to_fill;
+ ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
+ if (ret == -ENOMEM) {
+ /*
+ * Failed to fill it to the desired level -
+ * we'll start a timer and try again next time.
+ * As long as enough buffers are left in the ring for
+ * another A-MPDU rx, no special recovery is needed.
+ */
+ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+ msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
+ } else if (num_deficit > 0) {
+ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+ msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
+ }
+ spin_unlock_bh(&htt->rx_ring.lock);
+}
+
+static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
+{
+ struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
+
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+}
+
+int ath10k_htt_rx_ring_refill(struct ath10k *ar)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ int ret;
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
+ htt->rx_ring.fill_cnt));
+
+ if (ret)
+ ath10k_htt_rx_ring_free(htt);
+
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ return ret;
+}
+
+void ath10k_htt_rx_free(struct ath10k_htt *htt)
+{
+ del_timer_sync(&htt->rx_ring.refill_retry_timer);
+
+ skb_queue_purge(&htt->rx_msdus_q);
+ skb_queue_purge(&htt->rx_in_ord_compl_q);
+ skb_queue_purge(&htt->tx_fetch_ind_q);
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ dma_free_coherent(htt->ar->dev,
+ ath10k_htt_get_rx_ring_size(htt),
+ ath10k_htt_get_vaddr_ring(htt),
+ htt->rx_ring.base_paddr);
+
+ dma_free_coherent(htt->ar->dev,
+ sizeof(*htt->rx_ring.alloc_idx.vaddr),
+ htt->rx_ring.alloc_idx.vaddr,
+ htt->rx_ring.alloc_idx.paddr);
+
+ kfree(htt->rx_ring.netbufs_ring);
+}
+
+static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int idx;
+ struct sk_buff *msdu;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ if (htt->rx_ring.fill_cnt == 0) {
+ ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
+ return NULL;
+ }
+
+ idx = htt->rx_ring.sw_rd_idx.msdu_payld;
+ msdu = htt->rx_ring.netbufs_ring[idx];
+ htt->rx_ring.netbufs_ring[idx] = NULL;
+ ath10k_htt_reset_paddrs_ring(htt, idx);
+
+ idx++;
+ idx &= htt->rx_ring.size_mask;
+ htt->rx_ring.sw_rd_idx.msdu_payld = idx;
+ htt->rx_ring.fill_cnt--;
+
+ dma_unmap_single(htt->ar->dev,
+ ATH10K_SKB_RXCB(msdu)->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
+
+ return msdu;
+}
+
+/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
+static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
+ struct sk_buff_head *amsdu)
+{
+ struct ath10k *ar = htt->ar;
+ int msdu_len, msdu_chaining = 0;
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rx_desc;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ for (;;) {
+ int last_msdu, msdu_len_invalid, msdu_chained;
+
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(amsdu, msdu);
+
+ rx_desc = (struct htt_rx_desc *)msdu->data;
+
+ /* FIXME: we must report msdu payload since this is what caller
+ * expects now
+ */
+ skb_put(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+ skb_pull(msdu, offsetof(struct htt_rx_desc, msdu_payload));
+
+ /*
+ * Sanity check - confirm the HW is finished filling in the
+ * rx data.
+ * If the HW and SW are working correctly, then it's guaranteed
+ * that the HW's MAC DMA is done before this point in the SW.
+ * To prevent the case that we handle a stale Rx descriptor,
+ * just assert for now until we have a way to recover.
+ */
+ if (!(__le32_to_cpu(rx_desc->attention.flags)
+ & RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ __skb_queue_purge(amsdu);
+ return -EIO;
+ }
+
+ msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
+ & (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
+ RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
+ msdu_len = MS(__le32_to_cpu(rx_desc->msdu_start.common.info0),
+ RX_MSDU_START_INFO0_MSDU_LENGTH);
+ msdu_chained = rx_desc->frag_info.ring2_more_count;
+
+ if (msdu_len_invalid)
+ msdu_len = 0;
+
+ skb_trim(msdu, 0);
+ skb_put(msdu, min(msdu_len, HTT_RX_MSDU_SIZE));
+ msdu_len -= msdu->len;
+
+ /* Note: Chained buffers do not contain rx descriptor */
+ while (msdu_chained--) {
+ msdu = ath10k_htt_rx_netbuf_pop(htt);
+ if (!msdu) {
+ __skb_queue_purge(amsdu);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(amsdu, msdu);
+ skb_trim(msdu, 0);
+ skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
+ msdu_len -= msdu->len;
+ msdu_chaining = 1;
+ }
+
+ last_msdu = __le32_to_cpu(rx_desc->msdu_end.common.info0) &
+ RX_MSDU_END_INFO0_LAST_MSDU;
+
+ trace_ath10k_htt_rx_desc(ar, &rx_desc->attention,
+ sizeof(*rx_desc) - sizeof(u32));
+
+ if (last_msdu)
+ break;
+ }
+
+ if (skb_queue_empty(amsdu))
+ msdu_chaining = -1;
+
+ /*
+ * Don't refill the ring yet.
+ *
+ * First, the elements popped here are still in use - it is not
+ * safe to overwrite them until the matching call to
+ * mpdu_desc_list_next. Second, for efficiency it is preferable to
+ * refill the rx ring with 1 PPDU's worth of rx buffers (something
+ * like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
+ * (something like 3 buffers). Consequently, we'll rely on the txrx
+ * SW to tell us when it is done pulling all the PPDU's rx buffers
+ * out of the rx ring, and then refill it just once.
+ */
+
+ return msdu_chaining;
+}
+
+static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
+ u64 paddr)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_skb_rxcb *rxcb;
+ struct sk_buff *msdu;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
+ if (!msdu)
+ return NULL;
+
+ rxcb = ATH10K_SKB_RXCB(msdu);
+ hash_del(&rxcb->hlist);
+ htt->rx_ring.fill_cnt--;
+
+ dma_unmap_single(htt->ar->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
+ msdu->data, msdu->len + skb_tailroom(msdu));
+
+ return msdu;
+}
+
+static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
+ struct htt_rx_desc *rxd;
+ struct sk_buff *msdu;
+ int msdu_count;
+ bool is_offload;
+ u32 paddr;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu_count = __le16_to_cpu(ev->msdu_count);
+ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+ while (msdu_count--) {
+ paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
+
+ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!msdu) {
+ __skb_queue_purge(list);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(list, msdu);
+
+ if (!is_offload) {
+ rxd = (void *)msdu->data;
+
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+ skb_put(msdu, sizeof(*rxd));
+ skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+ if (!(__le32_to_cpu(rxd->attention.flags) &
+ RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+ return -EIO;
+ }
+ }
+
+ msdu_desc++;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
+ struct htt_rx_in_ord_ind *ev,
+ struct sk_buff_head *list)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
+ struct htt_rx_desc *rxd;
+ struct sk_buff *msdu;
+ int msdu_count;
+ bool is_offload;
+ u64 paddr;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ msdu_count = __le16_to_cpu(ev->msdu_count);
+ is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+
+ while (msdu_count--) {
+ paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
+ msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
+ if (!msdu) {
+ __skb_queue_purge(list);
+ return -ENOENT;
+ }
+
+ __skb_queue_tail(list, msdu);
+
+ if (!is_offload) {
+ rxd = (void *)msdu->data;
+
+ trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd));
+
+ skb_put(msdu, sizeof(*rxd));
+ skb_pull(msdu, sizeof(*rxd));
+ skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
+
+ if (!(__le32_to_cpu(rxd->attention.flags) &
+ RX_ATTENTION_FLAGS_MSDU_DONE)) {
+ ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
+ return -EIO;
+ }
+ }
+
+ msdu_desc++;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ dma_addr_t paddr;
+ void *vaddr, *vaddr_ring;
+ size_t size;
+ struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
+
+ htt->rx_confused = false;
+
+ /* XXX: The fill level could be changed during runtime in response to
+ * the host processing latency. Is this really worth it?
+ */
+ htt->rx_ring.size = HTT_RX_RING_SIZE;
+ htt->rx_ring.size_mask = htt->rx_ring.size - 1;
+ htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
+
+ if (!is_power_of_2(htt->rx_ring.size)) {
+ ath10k_warn(ar, "htt rx ring size is not power of 2\n");
+ return -EINVAL;
+ }
+
+ htt->rx_ring.netbufs_ring =
+ kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!htt->rx_ring.netbufs_ring)
+ goto err_netbuf;
+
+ size = ath10k_htt_get_rx_ring_size(htt);
+
+ vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
+ if (!vaddr_ring)
+ goto err_dma_ring;
+
+ ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
+ htt->rx_ring.base_paddr = paddr;
+
+ vaddr = dma_alloc_coherent(htt->ar->dev,
+ sizeof(*htt->rx_ring.alloc_idx.vaddr),
+ &paddr, GFP_KERNEL);
+ if (!vaddr)
+ goto err_dma_idx;
+
+ htt->rx_ring.alloc_idx.vaddr = vaddr;
+ htt->rx_ring.alloc_idx.paddr = paddr;
+ htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
+ *htt->rx_ring.alloc_idx.vaddr = 0;
+
+ /* Initialize the Rx refill retry timer */
+ timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
+
+ spin_lock_init(&htt->rx_ring.lock);
+
+ htt->rx_ring.fill_cnt = 0;
+ htt->rx_ring.sw_rd_idx.msdu_payld = 0;
+ hash_init(htt->rx_ring.skb_table);
+
+ skb_queue_head_init(&htt->rx_msdus_q);
+ skb_queue_head_init(&htt->rx_in_ord_compl_q);
+ skb_queue_head_init(&htt->tx_fetch_ind_q);
+ atomic_set(&htt->num_mpdus_ready, 0);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
+ htt->rx_ring.size, htt->rx_ring.fill_level);
+ return 0;
+
+err_dma_idx:
+ dma_free_coherent(htt->ar->dev,
+ ath10k_htt_get_rx_ring_size(htt),
+ vaddr_ring,
+ htt->rx_ring.base_paddr);
+err_dma_ring:
+ kfree(htt->rx_ring.netbufs_ring);
+err_netbuf:
+ return -ENOMEM;
+}
+
+static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_IV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_IV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
+#define MICHAEL_MIC_LEN 8
+
+static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
+static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
+struct amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
+
+static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
+{
+ u8 ret = 0;
+
+ switch (bw) {
+ case 0:
+ ret = RATE_INFO_BW_20;
+ break;
+ case 1:
+ ret = RATE_INFO_BW_40;
+ break;
+ case 2:
+ ret = RATE_INFO_BW_80;
+ break;
+ case 3:
+ ret = RATE_INFO_BW_160;
+ break;
+ }
+
+ return ret;
+}
+
+static void ath10k_htt_rx_h_rates(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ struct ieee80211_supported_band *sband;
+ u8 cck, rate, bw, sgi, mcs, nss;
+ u8 preamble = 0;
+ u8 group_id;
+ u32 info1, info2, info3;
+ u32 stbc, nsts_su;
+
+ info1 = __le32_to_cpu(rxd->ppdu_start.info1);
+ info2 = __le32_to_cpu(rxd->ppdu_start.info2);
+ info3 = __le32_to_cpu(rxd->ppdu_start.info3);
+
+ preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
+
+ switch (preamble) {
+ case HTT_RX_LEGACY:
+ /* To get legacy rate index band is required. Since band can't
+ * be undefined check if freq is non-zero.
+ */
+ if (!status->freq)
+ return;
+
+ cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
+ rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
+ rate &= ~RX_PPDU_START_RATE_FLAG;
+
+ sband = &ar->mac.sbands[status->band];
+ status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
+ break;
+ case HTT_RX_HT:
+ case HTT_RX_HT_WITH_TXBF:
+ /* HT-SIG - Table 20-11 in info2 and info3 */
+ mcs = info2 & 0x1F;
+ nss = mcs >> 3;
+ bw = (info2 >> 7) & 1;
+ sgi = (info3 >> 7) & 1;
+
+ status->rate_idx = mcs;
+ status->encoding = RX_ENC_HT;
+ if (sgi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ if (bw)
+ status->bw = RATE_INFO_BW_40;
+ break;
+ case HTT_RX_VHT:
+ case HTT_RX_VHT_WITH_TXBF:
+ /* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
+ * TODO check this
+ */
+ bw = info2 & 3;
+ sgi = info3 & 1;
+ stbc = (info2 >> 3) & 1;
+ group_id = (info2 >> 4) & 0x3F;
+
+ if (GROUP_ID_IS_SU_MIMO(group_id)) {
+ mcs = (info3 >> 4) & 0x0F;
+ nsts_su = ((info2 >> 10) & 0x07);
+ if (stbc)
+ nss = (nsts_su >> 2) + 1;
+ else
+ nss = (nsts_su + 1);
+ } else {
+ /* Hardware doesn't decode VHT-SIG-B into Rx descriptor
+ * so it's impossible to decode MCS. Also since
+ * firmware consumes Group Id Management frames host
+ * has no knowledge regarding group/user position
+ * mapping so it's impossible to pick the correct Nsts
+ * from VHT-SIG-A1.
+ *
+ * Bandwidth and SGI are valid so report the rateinfo
+ * on best-effort basis.
+ */
+ mcs = 0;
+ nss = 1;
+ }
+
+ if (mcs > 0x09) {
+ ath10k_warn(ar, "invalid MCS received %u\n", mcs);
+ ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
+ __le32_to_cpu(rxd->attention.flags),
+ __le32_to_cpu(rxd->mpdu_start.info0),
+ __le32_to_cpu(rxd->mpdu_start.info1),
+ __le32_to_cpu(rxd->msdu_start.common.info0),
+ __le32_to_cpu(rxd->msdu_start.common.info1),
+ rxd->ppdu_start.info0,
+ __le32_to_cpu(rxd->ppdu_start.info1),
+ __le32_to_cpu(rxd->ppdu_start.info2),
+ __le32_to_cpu(rxd->ppdu_start.info3),
+ __le32_to_cpu(rxd->ppdu_start.info4));
+
+ ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
+ __le32_to_cpu(rxd->msdu_end.common.info0),
+ __le32_to_cpu(rxd->mpdu_end.info0));
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
+ "rx desc msdu payload: ",
+ rxd->msdu_payload, 50);
+ }
+
+ status->rate_idx = mcs;
+ status->nss = nss;
+
+ if (sgi)
+ status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+ status->bw = ath10k_bw_to_mac80211_bw(bw);
+ status->encoding = RX_ENC_VHT;
+ break;
+ default:
+ break;
+ }
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
+{
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+ u16 peer_id;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!rxd)
+ return NULL;
+
+ if (rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
+ return NULL;
+
+ if (!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
+ return NULL;
+
+ peer_id = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_PEER_IDX);
+
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer)
+ return NULL;
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (WARN_ON_ONCE(!arvif))
+ return NULL;
+
+ if (ath10k_mac_vif_chan(arvif->vif, &def))
+ return NULL;
+
+ return def.chan;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_vif *arvif;
+ struct cfg80211_chan_def def;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_id == vdev_id &&
+ ath10k_mac_vif_chan(arvif->vif, &def) == 0)
+ return def.chan;
+ }
+
+ return NULL;
+}
+
+static void
+ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def *def = data;
+
+ *def = conf->def;
+}
+
+static struct ieee80211_channel *
+ath10k_htt_rx_h_any_channel(struct ath10k *ar)
+{
+ struct cfg80211_chan_def def = {};
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_htt_rx_h_any_chan_iter,
+ &def);
+
+ return def.chan;
+}
+
+static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd,
+ u32 vdev_id)
+{
+ struct ieee80211_channel *ch;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->scan_channel;
+ if (!ch)
+ ch = ar->rx_channel;
+ if (!ch)
+ ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
+ if (!ch)
+ ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
+ if (!ch)
+ ch = ath10k_htt_rx_h_any_channel(ar);
+ if (!ch)
+ ch = ar->tgt_oper_chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch)
+ return false;
+
+ status->band = ch->band;
+ status->freq = ch->center_freq;
+
+ return true;
+}
+
+static void ath10k_htt_rx_h_signal(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ int i;
+
+ for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
+ status->chains &= ~BIT(i);
+
+ if (rxd->ppdu_start.rssi_chains[i].pri20_mhz != 0x80) {
+ status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
+ rxd->ppdu_start.rssi_chains[i].pri20_mhz;
+
+ status->chains |= BIT(i);
+ }
+ }
+
+ /* FIXME: Get real NF */
+ status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ rxd->ppdu_start.rssi_comb;
+ status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
+}
+
+static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
+ struct ieee80211_rx_status *status,
+ struct htt_rx_desc *rxd)
+{
+ /* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
+ * means all prior MSDUs in a PPDU are reported to mac80211 without the
+ * TSF. Is it worth holding frames until end of PPDU is known?
+ *
+ * FIXME: Can we get/compute 64bit TSF?
+ */
+ status->mactime = __le32_to_cpu(rxd->ppdu_end.common.tsf_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+}
+
+static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status,
+ u32 vdev_id)
+{
+ struct sk_buff *first;
+ struct htt_rx_desc *rxd;
+ bool is_first_ppdu;
+ bool is_last_ppdu;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+
+ is_first_ppdu = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
+ is_last_ppdu = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
+
+ if (is_first_ppdu) {
+ /* New PPDU starts so clear out the old per-PPDU status. */
+ status->freq = 0;
+ status->rate_idx = 0;
+ status->nss = 0;
+ status->encoding = RX_ENC_LEGACY;
+ status->bw = RATE_INFO_BW_20;
+
+ status->flag &= ~RX_FLAG_MACTIME_END;
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
+ status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
+ status->ampdu_reference = ar->ampdu_reference;
+
+ ath10k_htt_rx_h_signal(ar, status, rxd);
+ ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
+ ath10k_htt_rx_h_rates(ar, status, rxd);
+ }
+
+ if (is_last_ppdu) {
+ ath10k_htt_rx_h_mactime(ar, status, rxd);
+
+ /* set ampdu last segment flag */
+ status->flag |= RX_FLAG_AMPDU_IS_LAST;
+ ar->ampdu_reference++;
+ }
+}
+
+static const char * const tid_to_ac[] = {
+ "BE",
+ "BK",
+ "BK",
+ "BE",
+ "VI",
+ "VI",
+ "VO",
+ "VO",
+};
+
+static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
+{
+ u8 *qc;
+ int tid;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return "";
+
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
+ if (tid < 8)
+ snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
+ else
+ snprintf(out, size, "tid %d", tid);
+
+ return out;
+}
+
+static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
+ struct ieee80211_rx_status *rx_status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ *status = *rx_status;
+
+ skb_queue_tail(&ar->htt.rx_msdus_q, skb);
+}
+
+static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *status;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ char tid[32];
+
+ status = IEEE80211_SKB_RXCB(skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_DATA,
+ "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ skb,
+ skb->len,
+ ieee80211_get_SA(hdr),
+ ath10k_get_tid(hdr, tid, sizeof(tid)),
+ is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
+ "mcast" : "ucast",
+ (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
+ skb->data, skb->len);
+ trace_ath10k_rx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_rx_payload(ar, skb->data, skb->len);
+
+ ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
+}
+
+static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
+ struct ieee80211_hdr *hdr)
+{
+ int len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
+ ar->running_fw->fw_file.fw_features))
+ len = round_up(len, 4);
+
+ return len;
+}
+
+static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted)
+{
+ struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
+ size_t hdr_len;
+ size_t crypto_len;
+ bool is_first;
+ bool is_last;
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ is_first = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Delivered decapped frame:
+ * [802.11 header]
+ * [crypto param] <-- can be trimmed if !fcs_err &&
+ * !decrypt_err && !peer_idx_invalid
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ * [payload]
+ * [FCS] <-- at end, needs to be trimmed
+ */
+
+ /* This probably shouldn't happen but warn just in case */
+ if (unlikely(WARN_ON_ONCE(!is_first)))
+ return;
+
+ /* This probably shouldn't happen but warn just in case */
+ if (unlikely(WARN_ON_ONCE(!(is_first && is_last))))
+ return;
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ /* In most cases this will be true for sniffed frames. It makes sense
+ * to deliver them as-is without stripping the crypto param. This is
+ * necessary for software based decryption.
+ *
+ * If there's no error then the frame is decrypted. At least that is
+ * the case for frames that come in via fragmented rx indication.
+ */
+ if (!is_decrypted)
+ return;
+
+ /* The payload is decrypted so strip crypto params. Start from tail
+ * since hdr is used to compute some stuff.
+ */
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
+ }
+
+ /* MMIC */
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
+ skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
+
+ /* Head */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ /* Delivered decapped frame:
+ * [nwifi 802.11 header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ *
+ * Note: The nwifi header doesn't have QoS Control and is
+ * (always?) a 3addr frame.
+ *
+ * Note2: There's no A-MSDU subframe header. Even if it's part
+ * of an A-MSDU.
+ */
+
+ /* pull decapped header and copy SA & DA */
+ rxd = (void *)msdu->data - sizeof(*rxd);
+
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
+
+ hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, hdr_len);
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
+ struct sk_buff *msdu,
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ struct htt_rx_desc *rxd;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_first, is_last, is_amsdu;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+
+ is_first = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+ is_amsdu = !(is_first && is_last);
+
+ rfc1042 = hdr;
+
+ if (is_first) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ rfc1042 += round_up(hdr_len, bytes_aligned) +
+ round_up(crypto_len, bytes_aligned);
+ }
+
+ if (is_amsdu)
+ rfc1042 += sizeof(struct amsdu_subframe_hdr);
+
+ return rfc1042;
+}
+
+static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ size_t hdr_len;
+ void *rfc1042;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ /* Delivered decapped frame:
+ * [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
+ * [payload]
+ */
+
+ rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, l3_pad_bytes);
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
+ sizeof(struct rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ int l3_pad_bytes;
+ struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ /* Delivered decapped frame:
+ * [amsdu header] <-- replaced with 802.11 hdr
+ * [rfc1042/llc]
+ * [payload]
+ */
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ l3_pad_bytes = ath10k_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
+
+ skb_put(msdu, l3_pad_bytes);
+ skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
+
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+}
+
+static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype,
+ bool is_decrypted)
+{
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format decap;
+
+ /* First msdu's decapped header:
+ * [802.11 header] <-- padded to 4 bytes long
+ * [crypto param] <-- padded to 4 bytes long
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ *
+ * Other (2nd, 3rd, ..) msdu's decapped header:
+ * [amsdu header] <-- only if A-MSDU
+ * [rfc1042/llc]
+ */
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ switch (decap) {
+ case RX_MSDU_DECAP_RAW:
+ ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
+ is_decrypted);
+ break;
+ case RX_MSDU_DECAP_NATIVE_WIFI:
+ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
+ enctype);
+ break;
+ case RX_MSDU_DECAP_ETHERNET2_DIX:
+ ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
+ break;
+ case RX_MSDU_DECAP_8023_SNAP_LLC:
+ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
+ enctype);
+ break;
+ }
+}
+
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags, info;
+ bool is_ip4, is_ip6;
+ bool is_tcp, is_udp;
+ bool ip_csum_ok, tcpudp_csum_ok;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+ info = __le32_to_cpu(rxd->msdu_start.common.info1);
+
+ is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
+ is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
+ is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
+ is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
+ ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
+ tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
+
+ if (!is_ip4 && !is_ip6)
+ return CHECKSUM_NONE;
+ if (!is_tcp && !is_udp)
+ return CHECKSUM_NONE;
+ if (!ip_csum_ok)
+ return CHECKSUM_NONE;
+ if (!tcpudp_csum_ok)
+ return CHECKSUM_NONE;
+
+ return CHECKSUM_UNNECESSARY;
+}
+
+static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
+{
+ msdu->ip_summed = ath10k_htt_rx_get_csum_state(msdu);
+}
+
+static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status,
+ bool fill_crypt_header,
+ u8 *rx_hdr,
+ enum ath10k_pkt_rx_err *err)
+{
+ struct sk_buff *first;
+ struct sk_buff *last;
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ u8 first_hdr[64];
+ u8 *qos;
+ bool has_fcs_err;
+ bool has_crypto_err;
+ bool has_tkip_err;
+ bool has_peer_idx_invalid;
+ bool is_decrypted;
+ bool is_mgmt;
+ u32 attention;
+
+ if (skb_queue_empty(amsdu))
+ return;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+
+ is_mgmt = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ /* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
+ * decapped header. It'll be used for undecapping of each MSDU.
+ */
+ hdr = (void *)rxd->rx_hdr_status;
+ memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
+
+ if (rx_hdr)
+ memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
+
+ /* Each A-MSDU subframe will use the original header as the base and be
+ * reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
+ */
+ hdr = (void *)first_hdr;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+
+ /* Some attention flags are valid only in the last MSDU. */
+ last = skb_peek_tail(amsdu);
+ rxd = (void *)last->data - sizeof(*rxd);
+ attention = __le32_to_cpu(rxd->attention.flags);
+
+ has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
+ has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
+ has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
+ has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
+
+ /* Note: If hardware captures an encrypted frame that it can't decrypt,
+ * e.g. due to fcs error, missing peer or invalid key data it will
+ * report the frame as raw.
+ */
+ is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
+ !has_fcs_err &&
+ !has_crypto_err &&
+ !has_peer_idx_invalid);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact. */
+ status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_ONLY_MONITOR |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (has_fcs_err)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (has_tkip_err)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (err) {
+ if (has_fcs_err)
+ *err = ATH10K_PKT_RX_ERR_FCS;
+ else if (has_tkip_err)
+ *err = ATH10K_PKT_RX_ERR_TKIP;
+ else if (has_crypto_err)
+ *err = ATH10K_PKT_RX_ERR_CRYPT;
+ else if (has_peer_idx_invalid)
+ *err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
+ }
+
+ /* Firmware reports all necessary management frames via WMI already.
+ * They are not reported to monitor interfaces at all so pass the ones
+ * coming via HTT to monitor interfaces instead. This simplifies
+ * matters a lot.
+ */
+ if (is_mgmt)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
+ if (is_decrypted) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (likely(!is_mgmt))
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+
+ if (fill_crypt_header)
+ status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
+
+ skb_queue_walk(amsdu, msdu) {
+ ath10k_htt_rx_h_csum_offload(msdu);
+ ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
+ is_decrypted);
+
+ /* Undecapping involves copying the original 802.11 header back
+ * to sk_buff. If frame is protected and hardware has decrypted
+ * it then remove the protected bit.
+ */
+ if (!is_decrypted)
+ continue;
+ if (is_mgmt)
+ continue;
+
+ if (fill_crypt_header)
+ continue;
+
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+}
+
+static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *status)
+{
+ struct sk_buff *msdu;
+ struct sk_buff *first_subframe;
+
+ first_subframe = skb_peek(amsdu);
+
+ while ((msdu = __skb_dequeue(amsdu))) {
+ /* Setup per-MSDU flags */
+ if (skb_queue_empty(amsdu))
+ status->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ status->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (msdu == first_subframe) {
+ first_subframe = NULL;
+ status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ status->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+
+ ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
+ }
+}
+
+static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
+ unsigned long int *unchain_cnt)
+{
+ struct sk_buff *skb, *first;
+ int space;
+ int total_len = 0;
+ int amsdu_len = skb_queue_len(amsdu);
+
+ /* TODO: Might could optimize this by using
+ * skb_try_coalesce or similar method to
+ * decrease copying, or maybe get mac80211 to
+ * provide a way to just receive a list of
+ * skb?
+ */
+
+ first = __skb_dequeue(amsdu);
+
+ /* Allocate total length all at once. */
+ skb_queue_walk(amsdu, skb)
+ total_len += skb->len;
+
+ space = total_len - skb_tailroom(first);
+ if ((space > 0) &&
+ (pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
+ /* TODO: bump some rx-oom error stat */
+ /* put it back together so we can free the
+ * whole list at once.
+ */
+ __skb_queue_head(amsdu, first);
+ return -1;
+ }
+
+ /* Walk list again, copying contents into
+ * msdu_head
+ */
+ while ((skb = __skb_dequeue(amsdu))) {
+ skb_copy_from_linear_data(skb, skb_put(first, skb->len),
+ skb->len);
+ dev_kfree_skb_any(skb);
+ }
+
+ __skb_queue_head(amsdu, first);
+
+ *unchain_cnt += amsdu_len - 1;
+
+ return 0;
+}
+
+static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ unsigned long int *drop_cnt,
+ unsigned long int *unchain_cnt)
+{
+ struct sk_buff *first;
+ struct htt_rx_desc *rxd;
+ enum rx_msdu_decap_format decap;
+
+ first = skb_peek(amsdu);
+ rxd = (void *)first->data - sizeof(*rxd);
+ decap = MS(__le32_to_cpu(rxd->msdu_start.common.info1),
+ RX_MSDU_START_INFO1_DECAP_FORMAT);
+
+ /* FIXME: Current unchaining logic can only handle simple case of raw
+ * msdu chaining. If decapping is other than raw the chaining may be
+ * more complex and this isn't handled by the current code. Don't even
+ * try re-constructing such frames - it'll be pretty much garbage.
+ */
+ if (decap != RX_MSDU_DECAP_RAW ||
+ skb_queue_len(amsdu) != 1 + rxd->frag_info.ring2_more_count) {
+ *drop_cnt += skb_queue_len(amsdu);
+ __skb_queue_purge(amsdu);
+ return;
+ }
+
+ ath10k_unchain_msdu(amsdu, unchain_cnt);
+}
+
+static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
+ struct sk_buff_head *amsdu)
+{
+ u8 *subframe_hdr;
+ struct sk_buff *first;
+ bool is_first, is_last;
+ struct htt_rx_desc *rxd;
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ enum htt_rx_mpdu_encrypt_type enctype;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
+
+ first = skb_peek(amsdu);
+
+ rxd = (void *)first->data - sizeof(*rxd);
+ hdr = (void *)rxd->rx_hdr_status;
+
+ is_first = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
+ is_last = !!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
+
+ /* Return in case of non-aggregated msdu */
+ if (is_first && is_last)
+ return true;
+
+ /* First msdu flag is not set for the first msdu of the list */
+ if (!is_first)
+ return false;
+
+ enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
+ RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+
+ subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
+ crypto_len;
+
+ /* Validate if the amsdu has a proper first subframe.
+ * There are chances a single msdu can be received as amsdu when
+ * the unauthenticated amsdu flag of a QoS header
+ * gets flipped in non-SPP AMSDU's, in such cases the first
+ * subframe has llc/snap header in place of a valid da.
+ * return false if the da matches rfc1042 pattern
+ */
+ if (ether_addr_equal(subframe_hdr, rfc1042_header))
+ return false;
+
+ return true;
+}
+
+static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status)
+{
+ if (!rx_status->freq) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
+ return false;
+ }
+
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
+ return false;
+ }
+
+ if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void ath10k_htt_rx_h_filter(struct ath10k *ar,
+ struct sk_buff_head *amsdu,
+ struct ieee80211_rx_status *rx_status,
+ unsigned long int *drop_cnt)
+{
+ if (skb_queue_empty(amsdu))
+ return;
+
+ if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
+ return;
+
+ if (drop_cnt)
+ *drop_cnt += skb_queue_len(amsdu);
+
+ __skb_queue_purge(amsdu);
+}
+
+static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct ieee80211_rx_status *rx_status = &htt->rx_status;
+ struct sk_buff_head amsdu;
+ int ret;
+ unsigned long int drop_cnt = 0;
+ unsigned long int unchain_cnt = 0;
+ unsigned long int drop_cnt_filter = 0;
+ unsigned long int msdus_to_queue, num_msdus;
+ enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
+ u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
+
+ __skb_queue_head_init(&amsdu);
+
+ spin_lock_bh(&htt->rx_ring.lock);
+ if (htt->rx_confused) {
+ spin_unlock_bh(&htt->rx_ring.lock);
+ return -EIO;
+ }
+ ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ if (ret < 0) {
+ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+ __skb_queue_purge(&amsdu);
+ /* FIXME: It's probably a good idea to reboot the
+ * device instead of leaving it inoperable.
+ */
+ htt->rx_confused = true;
+ return ret;
+ }
+
+ num_msdus = skb_queue_len(&amsdu);
+
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
+
+ /* only for ret = 1 indicates chained msdus */
+ if (ret > 0)
+ ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
+
+ ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err);
+ msdus_to_queue = skb_queue_len(&amsdu);
+ ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
+
+ ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
+ unchain_cnt, drop_cnt, drop_cnt_filter,
+ msdus_to_queue);
+
+ return 0;
+}
+
+static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
+ struct htt_rx_indication *rx)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ int num_mpdu_ranges;
+ int i, mpdu_count = 0;
+ u16 peer_id;
+ u8 tid;
+
+ num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
+ HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
+ peer_id = __le16_to_cpu(rx->hdr.peer_id);
+ tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
+
+ mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
+ rx, sizeof(*rx) +
+ (sizeof(struct htt_rx_indication_mpdu_range) *
+ num_mpdu_ranges));
+
+ for (i = 0; i < num_mpdu_ranges; i++)
+ mpdu_count += mpdu_ranges[i].mpdu_count;
+
+ atomic_add(mpdu_count, &htt->num_mpdus_ready);
+
+ ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
+ num_mpdu_ranges);
+}
+
+static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_done tx_done = {};
+ int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
+ __le16 msdu_id;
+ int i;
+
+ switch (status) {
+ case HTT_DATA_TX_STATUS_NO_ACK:
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
+ break;
+ case HTT_DATA_TX_STATUS_OK:
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
+ break;
+ case HTT_DATA_TX_STATUS_DISCARD:
+ case HTT_DATA_TX_STATUS_POSTPONE:
+ case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
+ break;
+ default:
+ ath10k_warn(ar, "unhandled tx completion status %d\n", status);
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
+ resp->data_tx_completion.num_msdus);
+
+ for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
+ msdu_id = resp->data_tx_completion.msdus[i];
+ tx_done.msdu_id = __le16_to_cpu(msdu_id);
+
+ /* kfifo_put: In practice firmware shouldn't fire off per-CE
+ * interrupt and main interrupt (MSI/-X range case) for the same
+ * HTC service so it should be safe to use kfifo_put w/o lock.
+ *
+ * From kfifo_put() documentation:
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+ if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+ ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
+ tx_done.msdu_id, tx_done.status);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ }
+ }
+}
+
+static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
+{
+ struct htt_rx_addba *ev = &resp->rx_addba;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ u16 info0, tid, peer_id;
+
+ info0 = __le16_to_cpu(ev->info0);
+ tid = MS(info0, HTT_RX_BA_INFO0_TID);
+ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx addba tid %hu peer_id %hu size %hhu\n",
+ tid, peer_id, ev->window_size);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
+ peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
+ peer->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx start rx ba session sta %pM tid %hu size %hhu\n",
+ peer->addr, tid, ev->window_size);
+
+ ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
+{
+ struct htt_rx_delba *ev = &resp->rx_delba;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ u16 info0, tid, peer_id;
+
+ info0 = __le16_to_cpu(ev->info0);
+ tid = MS(info0, HTT_RX_BA_INFO0_TID);
+ peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx delba tid %hu peer_id %hu\n",
+ tid, peer_id);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "received addba event for invalid peer_id: %hu\n",
+ peer_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ arvif = ath10k_get_arvif(ar, peer->vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
+ peer->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx stop rx ba session sta %pM tid %hu\n",
+ peer->addr, tid);
+
+ ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_htt_rx_extract_amsdu(struct sk_buff_head *list,
+ struct sk_buff_head *amsdu)
+{
+ struct sk_buff *msdu;
+ struct htt_rx_desc *rxd;
+
+ if (skb_queue_empty(list))
+ return -ENOBUFS;
+
+ if (WARN_ON(!skb_queue_empty(amsdu)))
+ return -EINVAL;
+
+ while ((msdu = __skb_dequeue(list))) {
+ __skb_queue_tail(amsdu, msdu);
+
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ if (rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
+ break;
+ }
+
+ msdu = skb_peek_tail(amsdu);
+ rxd = (void *)msdu->data - sizeof(*rxd);
+ if (!(rxd->msdu_end.common.info0 &
+ __cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
+ skb_queue_splice_init(amsdu, list);
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ /* Offloaded frames are already decrypted but firmware insists they are
+ * protected in the 802.11 header. Strip the flag. Otherwise mac80211
+ * will drop the frame.
+ */
+
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ status->flag |= RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+}
+
+static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
+ struct sk_buff_head *list)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct ieee80211_rx_status *status = &htt->rx_status;
+ struct htt_rx_offload_msdu *rx;
+ struct sk_buff *msdu;
+ size_t offset;
+
+ while ((msdu = __skb_dequeue(list))) {
+ /* Offloaded frames don't have Rx descriptor. Instead they have
+ * a short meta information header.
+ */
+
+ rx = (void *)msdu->data;
+
+ skb_put(msdu, sizeof(*rx));
+ skb_pull(msdu, sizeof(*rx));
+
+ if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
+ ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ skb_put(msdu, __le16_to_cpu(rx->msdu_len));
+
+ /* Offloaded rx header length isn't multiple of 2 nor 4 so the
+ * actual payload is unaligned. Align the frame. Otherwise
+ * mac80211 complains. This shouldn't reduce performance much
+ * because these offloaded frames are rare.
+ */
+ offset = 4 - ((unsigned long)msdu->data & 3);
+ skb_put(msdu, offset);
+ memmove(msdu->data + offset, msdu->data, msdu->len);
+ skb_pull(msdu, offset);
+
+ /* FIXME: The frame is NWifi. Re-construct QoS Control
+ * if possible later.
+ */
+
+ memset(status, 0, sizeof(*status));
+ status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ ath10k_htt_rx_h_rx_offload_prot(status, msdu);
+ ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
+ ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
+ }
+}
+
+static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (void *)skb->data;
+ struct ieee80211_rx_status *status = &htt->rx_status;
+ struct sk_buff_head list;
+ struct sk_buff_head amsdu;
+ u16 peer_id;
+ u16 msdu_count;
+ u8 vdev_id;
+ u8 tid;
+ bool offload;
+ bool frag;
+ int ret;
+
+ lockdep_assert_held(&htt->rx_ring.lock);
+
+ if (htt->rx_confused)
+ return -EIO;
+
+ skb_pull(skb, sizeof(resp->hdr));
+ skb_pull(skb, sizeof(resp->rx_in_ord_ind));
+
+ peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
+ msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
+ vdev_id = resp->rx_in_ord_ind.vdev_id;
+ tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
+ offload = !!(resp->rx_in_ord_ind.info &
+ HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
+ frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
+ vdev_id, peer_id, tid, offload, frag, msdu_count);
+
+ if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
+ ath10k_warn(ar, "dropping invalid in order rx indication\n");
+ return -EINVAL;
+ }
+
+ /* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
+ * extracted and processed.
+ */
+ __skb_queue_head_init(&list);
+ if (ar->hw_params.target_64bit)
+ ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
+ &list);
+ else
+ ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
+ &list);
+
+ if (ret < 0) {
+ ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
+ htt->rx_confused = true;
+ return -EIO;
+ }
+
+ /* Offloaded frames are very different and need to be handled
+ * separately.
+ */
+ if (offload)
+ ath10k_htt_rx_h_rx_offload(ar, &list);
+
+ while (!skb_queue_empty(&list)) {
+ __skb_queue_head_init(&amsdu);
+ ret = ath10k_htt_rx_extract_amsdu(&list, &amsdu);
+ switch (ret) {
+ case 0:
+ /* Note: The in-order indication may report interleaved
+ * frames from different PPDUs meaning reported rx rate
+ * to mac80211 isn't accurate/reliable. It's still
+ * better to report something than nothing though. This
+ * should still give an idea about rx rate to the user.
+ */
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
+ ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
+ NULL);
+ ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
+ break;
+ case -EAGAIN:
+ /* fall through */
+ default:
+ /* Should not happen. */
+ ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
+ htt->rx_confused = true;
+ __skb_queue_purge(&list);
+ return -EIO;
+ }
+ }
+ return ret;
+}
+
+static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
+ const __le32 *resp_ids,
+ int num_resp_ids)
+{
+ int i;
+ u32 resp_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
+ num_resp_ids);
+
+ for (i = 0; i < num_resp_ids; i++) {
+ resp_id = le32_to_cpu(resp_ids[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
+ resp_id);
+
+ /* TODO: free resp_id */
+ }
+}
+
+static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_fetch_record *record;
+ size_t len;
+ size_t max_num_bytes;
+ size_t max_num_msdus;
+ size_t num_bytes;
+ size_t num_msdus;
+ const __le32 *resp_ids;
+ u16 num_records;
+ u16 num_resp_ids;
+ u16 peer_id;
+ u8 tid;
+ int ret;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
+ return;
+ }
+
+ num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
+
+ len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
+ len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
+ num_records, num_resp_ids,
+ le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
+
+ if (!ar->htt.tx_q_state.enabled) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
+ return;
+ }
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
+ return;
+ }
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_fetch_ind.records[i];
+ peer_id = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_PEER_ID);
+ tid = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_TID);
+ max_num_msdus = le16_to_cpu(record->num_msdus);
+ max_num_bytes = le32_to_cpu(record->num_bytes);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
+ i, peer_id, tid, max_num_msdus, max_num_bytes);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ num_msdus = 0;
+ num_bytes = 0;
+
+ while (num_msdus < max_num_msdus &&
+ num_bytes < max_num_bytes) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+
+ num_msdus++;
+ num_bytes += ret;
+ }
+
+ record->num_msdus = cpu_to_le16(num_msdus);
+ record->num_bytes = cpu_to_le32(num_bytes);
+
+ ath10k_htt_tx_txq_recalc(hw, txq);
+ }
+
+ rcu_read_unlock();
+
+ resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
+
+ ret = ath10k_htt_tx_fetch_resp(ar,
+ resp->tx_fetch_ind.token,
+ resp->tx_fetch_ind.fetch_seq_num,
+ resp->tx_fetch_ind.records,
+ num_records);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
+ le32_to_cpu(resp->tx_fetch_ind.token), ret);
+ /* FIXME: request fw restart */
+ }
+
+ ath10k_htt_tx_txq_sync(ar);
+}
+
+static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ size_t len;
+ int num_resp_ids;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
+ return;
+ }
+
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
+ len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
+ return;
+ }
+
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
+ resp->tx_fetch_confirm.resp_ids,
+ num_resp_ids);
+}
+
+static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ const struct htt_tx_mode_switch_record *record;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ size_t len;
+ size_t num_records;
+ enum htt_tx_mode_switch_mode mode;
+ bool enable;
+ u16 info0;
+ u16 info1;
+ u16 threshold;
+ u16 peer_id;
+ u8 tid;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
+ return;
+ }
+
+ info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
+ info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
+
+ enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
+ num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+ mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
+ threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
+ info0, info1, enable, num_records, mode, threshold);
+
+ len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
+ return;
+ }
+
+ switch (mode) {
+ case HTT_TX_MODE_SWITCH_PUSH:
+ case HTT_TX_MODE_SWITCH_PUSH_PULL:
+ break;
+ default:
+ ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
+ mode);
+ return;
+ }
+
+ if (!enable)
+ return;
+
+ ar->htt.tx_q_state.enabled = enable;
+ ar->htt.tx_q_state.mode = mode;
+ ar->htt.tx_q_state.num_push_allowed = threshold;
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_mode_switch_ind.records[i];
+ info0 = le16_to_cpu(record->info0);
+ peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
+ tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq = (void *)txq->drv_priv;
+ artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ rcu_read_unlock();
+
+ ath10k_mac_tx_push_pending(ar);
+}
+
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ bool release;
+
+ release = ath10k_htt_t2h_msg_handler(ar, skb);
+
+ /* Free the indication buffer */
+ if (release)
+ dev_kfree_skb_any(skb);
+}
+
+static inline bool is_valid_legacy_rate(u8 rate)
+{
+ static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
+ 18, 24, 36, 48, 54};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
+ if (rate == legacy_rates[i])
+ return true;
+ }
+
+ return false;
+}
+
+static void
+ath10k_update_per_peer_tx_stats(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ath10k_per_peer_tx_stats *peer_stats)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ u8 rate = 0, sgi;
+ struct rate_info txrate;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
+ txrate.bw = ATH10K_HW_BW(peer_stats->flags);
+ txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
+ txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
+ sgi = ATH10K_HW_GI(peer_stats->flags);
+
+ if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
+ ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs);
+ return;
+ }
+
+ if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
+ (txrate.mcs > 7 || txrate.nss < 1)) {
+ ath10k_warn(ar, "Invalid HT mcs %hhd nss %hhd peer stats",
+ txrate.mcs, txrate.nss);
+ return;
+ }
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+
+ if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
+ txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
+ rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
+
+ if (!is_valid_legacy_rate(rate)) {
+ ath10k_warn(ar, "Invalid legacy rate %hhd peer stats",
+ rate);
+ return;
+ }
+
+ /* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
+ rate *= 10;
+ if (rate == 60 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
+ rate = rate - 5;
+ arsta->txrate.legacy = rate;
+ } else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
+ } else {
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ arsta->txrate.mcs = txrate.mcs;
+ }
+
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ arsta->txrate.nss = txrate.nss;
+ arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
+}
+
+static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+ struct htt_per_peer_tx_stats_ind *tx_stats;
+ struct ieee80211_sta *sta;
+ struct ath10k_peer *peer;
+ int peer_id, i;
+ u8 ppdu_len, num_ppdu;
+
+ num_ppdu = resp->peer_tx_stats.num_ppdu;
+ ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
+
+ if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
+ ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
+ return;
+ }
+
+ tx_stats = (struct htt_per_peer_tx_stats_ind *)
+ (resp->peer_tx_stats.payload);
+ peer_id = __le16_to_cpu(tx_stats->peer_id);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer || !peer->sta) {
+ ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
+ peer_id);
+ goto out;
+ }
+
+ sta = peer->sta;
+ for (i = 0; i < num_ppdu; i++) {
+ tx_stats = (struct htt_per_peer_tx_stats_ind *)
+ (resp->peer_tx_stats.payload + i * ppdu_len);
+
+ p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
+ p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
+ p_tx_stats->failed_bytes =
+ __le32_to_cpu(tx_stats->failed_bytes);
+ p_tx_stats->ratecode = tx_stats->ratecode;
+ p_tx_stats->flags = tx_stats->flags;
+ p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
+ p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
+ p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
+
+ ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+ }
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
+{
+ struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
+ struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
+ struct ath10k_10_2_peer_tx_stats *tx_stats;
+ struct ieee80211_sta *sta;
+ struct ath10k_peer *peer;
+ u16 log_type = __le16_to_cpu(hdr->log_type);
+ u32 peer_id = 0, i;
+
+ if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
+ return;
+
+ tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
+ ATH10K_10_2_TX_STATS_OFFSET);
+
+ if (!tx_stats->tx_ppdu_cnt)
+ return;
+
+ peer_id = tx_stats->peer_id;
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, peer_id);
+ if (!peer || !peer->sta) {
+ ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
+ peer_id);
+ goto out;
+ }
+
+ sta = peer->sta;
+ for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
+ p_tx_stats->succ_bytes =
+ __le16_to_cpu(tx_stats->success_bytes[i]);
+ p_tx_stats->retry_bytes =
+ __le16_to_cpu(tx_stats->retry_bytes[i]);
+ p_tx_stats->failed_bytes =
+ __le16_to_cpu(tx_stats->failed_bytes[i]);
+ p_tx_stats->ratecode = tx_stats->ratecode[i];
+ p_tx_stats->flags = tx_stats->flags[i];
+ p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
+ p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
+ p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
+
+ ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+
+ return;
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ enum htt_t2h_msg_type type;
+
+ /* confirm alignment */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn(ar, "unaligned htt message, expect trouble\n");
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
+ resp->hdr.msg_type);
+
+ if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
+ resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
+ return true;
+ }
+ type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF: {
+ htt->target_version_major = resp->ver_resp.major;
+ htt->target_version_minor = resp->ver_resp.minor;
+ complete(&htt->target_version_received);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_IND:
+ ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP: {
+ struct htt_peer_map_event ev = {
+ .vdev_id = resp->peer_map.vdev_id,
+ .peer_id = __le16_to_cpu(resp->peer_map.peer_id),
+ };
+ memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
+ ath10k_peer_map_event(htt, &ev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
+ struct htt_peer_unmap_event ev = {
+ .peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
+ };
+ ath10k_peer_unmap_event(htt, &ev);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
+ struct htt_tx_done tx_done = {};
+ int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
+ int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
+
+ tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+
+ switch (status) {
+ case HTT_MGMT_TX_STATUS_OK:
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
+ if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ ar->wmi.svc_map) &&
+ (resp->mgmt_tx_completion.flags &
+ HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
+ tx_done.ack_rssi =
+ FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
+ info);
+ }
+ break;
+ case HTT_MGMT_TX_STATUS_RETRY:
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
+ break;
+ case HTT_MGMT_TX_STATUS_DROP:
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
+ break;
+ }
+
+ status = ath10k_txrx_tx_unref(htt, &tx_done);
+ if (!status) {
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&htt->tx_lock);
+ }
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
+ ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_SEC_IND: {
+ struct ath10k *ar = htt->ar;
+ struct htt_security_indication *ev = &resp->security_indication;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "sec ind peer_id %d unicast %d type %d\n",
+ __le16_to_cpu(ev->peer_id),
+ !!(ev->flags & HTT_SECURITY_IS_UNICAST),
+ MS(ev->flags, HTT_SECURITY_TYPE));
+ complete(&ar->install_key_done);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ skb->data, skb->len);
+ atomic_inc(&htt->num_mpdus_ready);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TEST:
+ break;
+ case HTT_T2H_MSG_TYPE_STATS_CONF:
+ trace_ath10k_htt_stats(ar, skb->data, skb->len);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
+ /* Firmware can return tx frames if it's unable to fully
+ * process them and suspects host may be able to fix it. ath10k
+ * sends all tx frames as already inspected so this shouldn't
+ * happen unless fw has a bug.
+ */
+ ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
+ break;
+ case HTT_T2H_MSG_TYPE_RX_ADDBA:
+ ath10k_htt_rx_addba(ar, resp);
+ break;
+ case HTT_T2H_MSG_TYPE_RX_DELBA:
+ ath10k_htt_rx_delba(ar, resp);
+ break;
+ case HTT_T2H_MSG_TYPE_PKTLOG: {
+ trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
+ skb->len -
+ offsetof(struct htt_resp,
+ pktlog_msg.payload));
+
+ if (ath10k_peer_stats_enabled(ar))
+ ath10k_fetch_10_2_tx_stats(ar,
+ resp->pktlog_msg.payload);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_FLUSH: {
+ /* Ignore this event because mac80211 takes care of Rx
+ * aggregation reordering.
+ */
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
+ skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
+ return false;
+ }
+ case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
+ break;
+ case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
+ u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
+ u32 freq = __le32_to_cpu(resp->chan_change.freq);
+
+ ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt chan change freq %u phymode %s\n",
+ freq, ath10k_wmi_phymode_str(phymode));
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_AGGR_CONF:
+ break;
+ case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
+ struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
+
+ if (!tx_fetch_ind) {
+ ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
+ break;
+ }
+ skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
+ break;
+ }
+ case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
+ ath10k_htt_rx_tx_fetch_confirm(ar, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
+ ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_STATS:
+ ath10k_htt_fetch_peer_stats(ar, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EN_STATS:
+ default:
+ ath10k_warn(ar, "htt event (%d) not handled\n",
+ resp->hdr.msg_type);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
+ skb->data, skb->len);
+ break;
+ }
+ return true;
+}
+EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
+
+void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
+
+static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
+{
+ struct sk_buff *skb;
+
+ while (quota < budget) {
+ if (skb_queue_empty(&ar->htt.rx_msdus_q))
+ break;
+
+ skb = skb_dequeue(&ar->htt.rx_msdus_q);
+ if (!skb)
+ break;
+ ath10k_process_rx(ar, skb);
+ quota++;
+ }
+
+ return quota;
+}
+
+int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_tx_done tx_done = {};
+ struct sk_buff_head tx_ind_q;
+ struct sk_buff *skb;
+ unsigned long flags;
+ int quota = 0, done, ret;
+ bool resched_napi = false;
+
+ __skb_queue_head_init(&tx_ind_q);
+
+ /* Process pending frames before dequeuing more data
+ * from hardware.
+ */
+ quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+ if (quota == budget) {
+ resched_napi = true;
+ goto exit;
+ }
+
+ while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
+ spin_lock_bh(&htt->rx_ring.lock);
+ ret = ath10k_htt_rx_in_ord_ind(ar, skb);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
+ dev_kfree_skb_any(skb);
+ if (ret == -EIO) {
+ resched_napi = true;
+ goto exit;
+ }
+ }
+
+ while (atomic_read(&htt->num_mpdus_ready)) {
+ ret = ath10k_htt_rx_handle_amsdu(htt);
+ if (ret == -EIO) {
+ resched_napi = true;
+ goto exit;
+ }
+ atomic_dec(&htt->num_mpdus_ready);
+ }
+
+ /* Deliver received data after processing data from hardware */
+ quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
+
+ /* From NAPI documentation:
+ * The napi poll() function may also process TX completions, in which
+ * case if it processes the entire TX ring then it should count that
+ * work as the rest of the budget.
+ */
+ if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
+ quota = budget;
+
+ /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
+ * From kfifo_get() documentation:
+ * Note that with only one concurrent reader and one concurrent writer,
+ * you don't need extra locking to use these macro.
+ */
+ while (kfifo_get(&htt->txdone_fifo, &tx_done))
+ ath10k_txrx_tx_unref(htt, &tx_done);
+
+ ath10k_mac_tx_push_pending(ar);
+
+ spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+ skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+ spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+
+ while ((skb = __skb_dequeue(&tx_ind_q))) {
+ ath10k_htt_rx_tx_fetch_ind(ar, skb);
+ dev_kfree_skb_any(skb);
+ }
+
+exit:
+ ath10k_htt_rx_msdu_buff_replenish(htt);
+ /* In case of rx failure or more data to read, report budget
+ * to reschedule NAPI poll
+ */
+ done = resched_napi ? budget : quota;
+
+ return done;
+}
+EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
+};
+
+static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
+ .htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
+ .htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
+ .htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
+ .htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
+ .htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
+};
+
+void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ if (ar->hw_params.target_64bit)
+ htt->rx_ops = &htt_rx_ops_64;
+ else
+ htt->rx_ops = &htt_rx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
new file mode 100644
index 000000000..3718d4dfc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -0,0 +1,1579 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include "htt.h"
+#include "mac.h"
+#include "hif.h"
+#include "txrx.h"
+#include "debug.h"
+
+static u8 ath10k_htt_tx_txq_calc_size(size_t count)
+{
+ int exp;
+ int factor;
+
+ exp = 0;
+ factor = count >> 7;
+
+ while (factor >= 64 && exp < 4) {
+ factor >>= 3;
+ exp++;
+ }
+
+ if (exp == 4)
+ return 0xff;
+
+ if (count > 0)
+ factor = max(1, factor);
+
+ return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
+ SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
+}
+
+static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta;
+ struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
+ unsigned long frame_cnt;
+ unsigned long byte_cnt;
+ int idx;
+ u32 bit;
+ u16 peer_id;
+ u8 tid;
+ u8 count;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ if (txq->sta) {
+ arsta = (void *)txq->sta->drv_priv;
+ peer_id = arsta->peer_id;
+ } else {
+ peer_id = arvif->peer_id;
+ }
+
+ tid = txq->tid;
+ bit = BIT(peer_id % 32);
+ idx = peer_id / 32;
+
+ ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ count = ath10k_htt_tx_txq_calc_size(byte_cnt);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
+ peer_id, tid);
+ return;
+ }
+
+ ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
+ peer_id, tid, count);
+}
+
+static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ u32 seq;
+ size_t size;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
+ seq++;
+ ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
+ seq);
+
+ size = sizeof(*ar->htt.tx_q_state.vaddr);
+ dma_sync_single_for_device(ar->dev,
+ ar->htt.tx_q_state.paddr,
+ size,
+ DMA_TO_DEVICE);
+}
+
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ htt->num_pending_tx--;
+ if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
+ ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+ if (htt->num_pending_tx == 0)
+ wake_up(&htt->empty_tx_wq);
+}
+
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (htt->num_pending_tx >= htt->max_num_pending_tx)
+ return -EBUSY;
+
+ htt->num_pending_tx++;
+ if (htt->num_pending_tx == htt->max_num_pending_tx)
+ ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+ return 0;
+}
+
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp)
+{
+ struct ath10k *ar = htt->ar;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
+ return 0;
+
+ if (is_presp &&
+ ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
+ return -EBUSY;
+
+ htt->num_pending_mgmt_tx++;
+
+ return 0;
+}
+
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (!htt->ar->hw_params.max_probe_resp_desc_thres)
+ return;
+
+ htt->num_pending_mgmt_tx--;
+}
+
+int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
+{
+ struct ath10k *ar = htt->ar;
+ int ret;
+
+ spin_lock_bh(&htt->tx_lock);
+ ret = idr_alloc(&htt->pending_tx, skb, 0,
+ htt->max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&htt->tx_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
+
+ return ret;
+}
+
+void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
+{
+ struct ath10k *ar = htt->ar;
+
+ lockdep_assert_held(&htt->tx_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
+
+ idr_remove(&htt->pending_tx, msdu_id);
+}
+
+static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!htt->txbuf.vaddr_txbuff_32)
+ return;
+
+ size = htt->txbuf.size;
+ dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32,
+ htt->txbuf.paddr);
+ htt->txbuf.vaddr_txbuff_32 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf_32);
+
+ htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size,
+ &htt->txbuf.paddr,
+ GFP_KERNEL);
+ if (!htt->txbuf.vaddr_txbuff_32)
+ return -ENOMEM;
+
+ htt->txbuf.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!htt->txbuf.vaddr_txbuff_64)
+ return;
+
+ size = htt->txbuf.size;
+ dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64,
+ htt->txbuf.paddr);
+ htt->txbuf.vaddr_txbuff_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct ath10k_htt_txbuf_64);
+
+ htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size,
+ &htt->txbuf.paddr,
+ GFP_KERNEL);
+ if (!htt->txbuf.vaddr_txbuff_64)
+ return -ENOMEM;
+
+ htt->txbuf.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr_desc_32)
+ return;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
+
+ dma_free_coherent(htt->ar->dev,
+ size,
+ htt->frag_desc.vaddr_desc_32,
+ htt->frag_desc.paddr);
+
+ htt->frag_desc.vaddr_desc_32 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc);
+ htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr_desc_32) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
+ return -ENOMEM;
+ }
+ htt->frag_desc.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+ size_t size;
+
+ if (!htt->frag_desc.vaddr_desc_64)
+ return;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc_64);
+
+ dma_free_coherent(htt->ar->dev,
+ size,
+ htt->frag_desc.vaddr_desc_64,
+ htt->frag_desc.paddr);
+
+ htt->frag_desc.vaddr_desc_64 = NULL;
+}
+
+static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ size = htt->max_num_pending_tx *
+ sizeof(struct htt_msdu_ext_desc_64);
+
+ htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size,
+ &htt->frag_desc.paddr,
+ GFP_KERNEL);
+ if (!htt->frag_desc.vaddr_desc_64) {
+ ath10k_err(ar, "failed to alloc fragment desc memory\n");
+ return -ENOMEM;
+ }
+ htt->frag_desc.size = size;
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ return;
+
+ size = sizeof(*htt->tx_q_state.vaddr);
+
+ dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE);
+ kfree(htt->tx_q_state.vaddr);
+}
+
+static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ size_t size;
+ int ret;
+
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ return 0;
+
+ htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
+ htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS;
+ htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES;
+
+ size = sizeof(*htt->tx_q_state.vaddr);
+ htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL);
+ if (!htt->tx_q_state.vaddr)
+ return -ENOMEM;
+
+ htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr,
+ size, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret);
+ kfree(htt->tx_q_state.vaddr);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt)
+{
+ WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
+ kfifo_free(&htt->txdone_fifo);
+}
+
+static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt)
+{
+ int ret;
+ size_t size;
+
+ size = roundup_pow_of_two(htt->max_num_pending_tx);
+ ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+ return ret;
+}
+
+static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int ret;
+
+ ret = ath10k_htt_alloc_txbuff(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_htt_alloc_frag_desc(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret);
+ goto free_txbuf;
+ }
+
+ ret = ath10k_htt_tx_alloc_txq(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc txq: %d\n", ret);
+ goto free_frag_desc;
+ }
+
+ ret = ath10k_htt_tx_alloc_txdone_fifo(htt);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
+ goto free_txq;
+ }
+
+ return 0;
+
+free_txq:
+ ath10k_htt_tx_free_txq(htt);
+
+free_frag_desc:
+ ath10k_htt_free_frag_desc(htt);
+
+free_txbuf:
+ ath10k_htt_free_txbuff(htt);
+
+ return ret;
+}
+
+int ath10k_htt_tx_start(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
+ htt->max_num_pending_tx);
+
+ spin_lock_init(&htt->tx_lock);
+ idr_init(&htt->pending_tx);
+
+ if (htt->tx_mem_allocated)
+ return 0;
+
+ ret = ath10k_htt_tx_alloc_buf(htt);
+ if (ret)
+ goto free_idr_pending_tx;
+
+ htt->tx_mem_allocated = true;
+
+ return 0;
+
+free_idr_pending_tx:
+ idr_destroy(&htt->pending_tx);
+
+ return ret;
+}
+
+static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
+{
+ struct ath10k *ar = ctx;
+ struct ath10k_htt *htt = &ar->htt;
+ struct htt_tx_done tx_done = {0};
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
+
+ tx_done.msdu_id = msdu_id;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
+
+ ath10k_txrx_tx_unref(htt, &tx_done);
+
+ return 0;
+}
+
+void ath10k_htt_tx_destroy(struct ath10k_htt *htt)
+{
+ if (!htt->tx_mem_allocated)
+ return;
+
+ ath10k_htt_free_txbuff(htt);
+ ath10k_htt_tx_free_txq(htt);
+ ath10k_htt_free_frag_desc(htt);
+ ath10k_htt_tx_free_txdone_fifo(htt);
+ htt->tx_mem_allocated = false;
+}
+
+void ath10k_htt_tx_stop(struct ath10k_htt *htt)
+{
+ idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
+ idr_destroy(&htt->pending_tx);
+}
+
+void ath10k_htt_tx_free(struct ath10k_htt *htt)
+{
+ ath10k_htt_tx_stop(htt);
+ ath10k_htt_tx_destroy(htt);
+}
+
+void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+EXPORT_SYMBOL(ath10k_htt_hif_tx_complete);
+
+int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0;
+ int ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->ver_req);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ;
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_stats_req *req;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len = 0, ret;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->stats_req);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ;
+
+ req = &cmd->stats_req;
+
+ memset(req, 0, sizeof(*req));
+
+ /* currently we support only max 8 bit masks so no need to worry
+ * about endian support
+ */
+ req->upload_types[0] = mask;
+ req->reset_types[0] = mask;
+ req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID;
+ req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff);
+ req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_frag_desc_bank_cfg32 *cfg;
+ int ret, size;
+ u8 info;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ if (!htt->frag_desc.paddr) {
+ ath10k_warn(ar, "invalid frag desc memory\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32);
+ skb = ath10k_htc_alloc_skb(ar, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+
+ info = 0;
+ info |= SM(htt->tx_q_state.type,
+ HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+ cfg = &cmd->frag_desc_bank_cfg32;
+ cfg->info = info;
+ cfg->num_banks = 1;
+ cfg->desc_size = sizeof(struct htt_msdu_ext_desc);
+ cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr);
+ cfg->bank_id[0].bank_min_id = 0;
+ cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+ 1);
+
+ cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+ cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+ cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+ cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+ cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_frag_desc_bank_cfg64 *cfg;
+ int ret, size;
+ u8 info;
+
+ if (!ar->hw_params.continuous_frag_desc)
+ return 0;
+
+ if (!htt->frag_desc.paddr) {
+ ath10k_warn(ar, "invalid frag desc memory\n");
+ return -EINVAL;
+ }
+
+ size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64);
+ skb = ath10k_htc_alloc_skb(ar, size);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, size);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG;
+
+ info = 0;
+ info |= SM(htt->tx_q_state.type,
+ HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
+
+ cfg = &cmd->frag_desc_bank_cfg64;
+ cfg->info = info;
+ cfg->num_banks = 1;
+ cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64);
+ cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr);
+ cfg->bank_id[0].bank_min_id = 0;
+ cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx -
+ 1);
+
+ cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr);
+ cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers);
+ cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids);
+ cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE;
+ cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n");
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_htt_fill_rx_desc_offset_32(void *rx_ring)
+{
+ struct htt_rx_ring_setup_ring32 *ring =
+ (struct htt_rx_ring_setup_ring32 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+ ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static void ath10k_htt_fill_rx_desc_offset_64(void *rx_ring)
+{
+ struct htt_rx_ring_setup_ring64 *ring =
+ (struct htt_rx_ring_setup_ring64 *)rx_ring;
+
+#define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4)
+ ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status));
+ ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload));
+ ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start));
+ ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end));
+ ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start));
+ ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end));
+ ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start));
+ ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end));
+ ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention));
+ ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info));
+#undef desc_offset
+}
+
+static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring32 *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ u32 fw_idx;
+ int len;
+ int ret;
+
+ /*
+ * the HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup_32.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup_32.hdr.num_rings = 1;
+
+ /* FIXME: do we need all of this? */
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_PPDU_START;
+ flags |= HTT_RX_RING_FLAGS_PPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MPDU_START;
+ flags |= HTT_RX_RING_FLAGS_MPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MSDU_START;
+ flags |= HTT_RX_RING_FLAGS_MSDU_END;
+ flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+ flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+ flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+ flags |= HTT_RX_RING_FLAGS_NULL_RX;
+ flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+
+ fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ ring->fw_idx_shadow_reg_paddr =
+ __cpu_to_le32(htt->rx_ring.alloc_idx.paddr);
+ ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
+ ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+ ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+ ath10k_htt_fill_rx_desc_offset_32(ring);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ struct htt_rx_ring_setup_ring64 *ring;
+ const int num_rx_ring = 1;
+ u16 flags;
+ u32 fw_idx;
+ int len;
+ int ret;
+
+ /* HW expects the buffer to be an integral number of 4-byte
+ * "words"
+ */
+ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4));
+ BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0);
+
+ len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr)
+ + (sizeof(*ring) * num_rx_ring);
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_cmd *)skb->data;
+ ring = &cmd->rx_setup_64.rings[0];
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG;
+ cmd->rx_setup_64.hdr.num_rings = 1;
+
+ flags = 0;
+ flags |= HTT_RX_RING_FLAGS_MAC80211_HDR;
+ flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD;
+ flags |= HTT_RX_RING_FLAGS_PPDU_START;
+ flags |= HTT_RX_RING_FLAGS_PPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MPDU_START;
+ flags |= HTT_RX_RING_FLAGS_MPDU_END;
+ flags |= HTT_RX_RING_FLAGS_MSDU_START;
+ flags |= HTT_RX_RING_FLAGS_MSDU_END;
+ flags |= HTT_RX_RING_FLAGS_RX_ATTENTION;
+ flags |= HTT_RX_RING_FLAGS_FRAG_INFO;
+ flags |= HTT_RX_RING_FLAGS_UNICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_MULTICAST_RX;
+ flags |= HTT_RX_RING_FLAGS_CTRL_RX;
+ flags |= HTT_RX_RING_FLAGS_MGMT_RX;
+ flags |= HTT_RX_RING_FLAGS_NULL_RX;
+ flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX;
+
+ fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr);
+ ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr);
+ ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
+ ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
+ ring->flags = __cpu_to_le16(flags);
+ ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
+
+ ath10k_htt_fill_rx_desc_offset_64(ring);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
+ u8 max_subfrms_ampdu,
+ u8 max_subfrms_amsdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_aggr_conf *aggr_conf;
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ int len;
+ int ret;
+
+ /* Firmware defaults are: amsdu = 3 and ampdu = 64 */
+
+ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64)
+ return -EINVAL;
+
+ if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31)
+ return -EINVAL;
+
+ len = sizeof(cmd->hdr);
+ len += sizeof(cmd->aggr_conf);
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG;
+
+ aggr_conf = &cmd->aggr_conf;
+ aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu;
+ aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d",
+ aggr_conf->max_num_amsdu_subframes,
+ aggr_conf->max_num_ampdu_subframes);
+
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records)
+{
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ const u16 resp_id = 0;
+ int len = 0;
+ int ret;
+
+ /* Response IDs are echo-ed back only for host driver convienence
+ * purposes. They aren't used for anything in the driver yet so use 0.
+ */
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->tx_fetch_resp);
+ len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
+ cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
+ cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
+ cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
+ cmd->tx_fetch_resp.token = token;
+
+ memcpy(cmd->tx_fetch_resp.records, records,
+ sizeof(records[0]) * num_records);
+
+ ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
+ goto err_free_skb;
+ }
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ struct ath10k_vif *arvif;
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ return ar->scan.vdev_id;
+ } else if (cb->vif) {
+ arvif = (void *)cb->vif->drv_priv;
+ return arvif->vdev_id;
+ } else if (ar->monitor_started) {
+ return ar->monitor_vdev_id;
+ } else {
+ return 0;
+ }
+}
+
+static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+ if (!is_eth && ieee80211_is_mgmt(hdr->frame_control))
+ return HTT_DATA_TX_EXT_TID_MGMT;
+ else if (cb->flags & ATH10K_SKB_F_QOS)
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ else
+ return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
+}
+
+int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct sk_buff *txdesc = NULL;
+ struct htt_cmd *cmd;
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ int len = 0;
+ int msdu_id = -1;
+ int res;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->mgmt_tx);
+
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0)
+ goto err;
+
+ msdu_id = res;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+
+ txdesc = ath10k_htc_alloc_skb(ar, len);
+ if (!txdesc) {
+ res = -ENOMEM;
+ goto err_free_msdu_id;
+ }
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res) {
+ res = -EIO;
+ goto err_free_txdesc;
+ }
+
+ skb_put(txdesc, len);
+ cmd = (struct htt_cmd *)txdesc->data;
+ memset(cmd, 0, len);
+
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX;
+ cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
+ cmd->mgmt_tx.len = __cpu_to_le32(msdu->len);
+ cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id);
+ cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id);
+ memcpy(cmd->mgmt_tx.hdr, msdu->data,
+ min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));
+
+ res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_txdesc:
+ dev_kfree_skb_any(txdesc);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err:
+ return res;
+}
+
+static int ath10k_htt_tx_32(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_hif_sg_item sg_items[2];
+ struct ath10k_htt_txbuf_32 *txbuf;
+ struct htt_data_tx_desc_frag *frags;
+ bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+ int prefetch_len;
+ int res;
+ u8 flags0 = 0;
+ u16 msdu_id, flags1 = 0;
+ u16 freq = 0;
+ u32 frags_paddr = 0;
+ u32 txbuf_paddr;
+ struct htt_msdu_ext_desc *ext_desc = NULL;
+ struct htt_msdu_ext_desc *ext_desc_t = NULL;
+
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0)
+ goto err;
+
+ msdu_id = res;
+
+ prefetch_len = min(htt->prefetch_len, msdu->len);
+ prefetch_len = roundup(prefetch_len, 4);
+
+ txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id;
+ txbuf_paddr = htt->txbuf.paddr +
+ (sizeof(struct ath10k_htt_txbuf_32) * msdu_id);
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res) {
+ res = -EIO;
+ goto err_free_msdu_id;
+ }
+
+ if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ /* fall through */
+ case ATH10K_HW_TXRX_ETHERNET:
+ if (ar->hw_params.continuous_frag_desc) {
+ ext_desc_t = htt->frag_desc.vaddr_desc_32;
+ memset(&ext_desc_t[msdu_id], 0,
+ sizeof(struct htt_msdu_ext_desc));
+ frags = (struct htt_data_tx_desc_frag *)
+ &ext_desc_t[msdu_id].frags;
+ ext_desc = &ext_desc_t[msdu_id];
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi = 0;
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+ frags_paddr = htt->frag_desc.paddr +
+ (sizeof(struct htt_msdu_ext_desc) * msdu_id);
+ } else {
+ frags = txbuf->frags;
+ frags[0].dword_addr.paddr =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].dword_addr.len = __cpu_to_le32(msdu->len);
+ frags[1].dword_addr.paddr = 0;
+ frags[1].dword_addr.len = 0;
+
+ frags_paddr = txbuf_paddr;
+ }
+ flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ frags_paddr = skb_cb->paddr;
+ break;
+ }
+
+ /* Normally all commands go through HTC which manages tx credits for
+ * each endpoint and notifies when tx is completed.
+ *
+ * HTT endpoint is creditless so there's no need to care about HTC
+ * flags. In that case it is trivial to fill the HTC header here.
+ *
+ * MSDU transmission is considered completed upon HTT event. This
+ * implies no relevant resources can be freed until after the event is
+ * received. That's why HTC tx completion handler itself is ignored by
+ * setting NULL to transfer_context for all sg items.
+ *
+ * There is simply no point in pushing HTT TX_FRM through HTC tx path
+ * as it's a waste of resources. By bypassing HTC it is possible to
+ * avoid extra memory allocations, compress data structures and thus
+ * improve performance.
+ */
+
+ txbuf->htc_hdr.eid = htt->eid;
+ txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx) +
+ prefetch_len);
+ txbuf->htc_hdr.flags = 0;
+
+ if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ if (ar->hw_params.continuous_frag_desc)
+ ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE;
+ }
+
+ /* Prevent firmware from sending up tx inspection requests. There's
+ * nothing ath10k can do with frames requested for inspection so force
+ * it to simply rely a regular tx completion with discard status.
+ */
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+ txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ txbuf->cmd_tx.flags0 = flags0;
+ txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+ txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
+ if (ath10k_mac_tx_frm_has_freq(ar)) {
+ txbuf->cmd_tx.offchan_tx.peerid =
+ __cpu_to_le16(HTT_INVALID_PEERID);
+ txbuf->cmd_tx.offchan_tx.freq =
+ __cpu_to_le16(freq);
+ } else {
+ txbuf->cmd_tx.peerid =
+ __cpu_to_le32(HTT_INVALID_PEERID);
+ }
+
+ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+ flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+ &skb_cb->paddr, vdev_id, tid, freq);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ msdu->data, msdu->len);
+ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+ sg_items[0].transfer_id = 0;
+ sg_items[0].transfer_context = NULL;
+ sg_items[0].vaddr = &txbuf->htc_hdr;
+ sg_items[0].paddr = txbuf_paddr +
+ sizeof(txbuf->frags);
+ sg_items[0].len = sizeof(txbuf->htc_hdr) +
+ sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx);
+
+ sg_items[1].transfer_id = 0;
+ sg_items[1].transfer_context = NULL;
+ sg_items[1].vaddr = msdu->data;
+ sg_items[1].paddr = skb_cb->paddr;
+ sg_items[1].len = prefetch_len;
+
+ res = ath10k_hif_tx_sg(htt->ar,
+ htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+ sg_items, ARRAY_SIZE(sg_items));
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err:
+ return res;
+}
+
+static int ath10k_htt_tx_64(struct ath10k_htt *htt,
+ enum ath10k_hw_txrx_mode txmode,
+ struct sk_buff *msdu)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_hif_sg_item sg_items[2];
+ struct ath10k_htt_txbuf_64 *txbuf;
+ struct htt_data_tx_desc_frag *frags;
+ bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET);
+ u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu);
+ u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth);
+ int prefetch_len;
+ int res;
+ u8 flags0 = 0;
+ u16 msdu_id, flags1 = 0;
+ u16 freq = 0;
+ dma_addr_t frags_paddr = 0;
+ u32 txbuf_paddr;
+ struct htt_msdu_ext_desc_64 *ext_desc = NULL;
+ struct htt_msdu_ext_desc_64 *ext_desc_t = NULL;
+
+ res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
+ if (res < 0)
+ goto err;
+
+ msdu_id = res;
+
+ prefetch_len = min(htt->prefetch_len, msdu->len);
+ prefetch_len = roundup(prefetch_len, 4);
+
+ txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id;
+ txbuf_paddr = htt->txbuf.paddr +
+ (sizeof(struct ath10k_htt_txbuf_64) * msdu_id);
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) &&
+ txmode == ATH10K_HW_TXRX_RAW &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ }
+
+ skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+ DMA_TO_DEVICE);
+ res = dma_mapping_error(dev, skb_cb->paddr);
+ if (res) {
+ res = -EIO;
+ goto err_free_msdu_id;
+ }
+
+ if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+ /* fall through */
+ case ATH10K_HW_TXRX_ETHERNET:
+ if (ar->hw_params.continuous_frag_desc) {
+ ext_desc_t = htt->frag_desc.vaddr_desc_64;
+ memset(&ext_desc_t[msdu_id], 0,
+ sizeof(struct htt_msdu_ext_desc_64));
+ frags = (struct htt_data_tx_desc_frag *)
+ &ext_desc_t[msdu_id].frags;
+ ext_desc = &ext_desc_t[msdu_id];
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi =
+ __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+
+ frags_paddr = htt->frag_desc.paddr +
+ (sizeof(struct htt_msdu_ext_desc_64) * msdu_id);
+ } else {
+ frags = txbuf->frags;
+ frags[0].tword_addr.paddr_lo =
+ __cpu_to_le32(skb_cb->paddr);
+ frags[0].tword_addr.paddr_hi =
+ __cpu_to_le16(upper_32_bits(skb_cb->paddr));
+ frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len);
+ frags[1].tword_addr.paddr_lo = 0;
+ frags[1].tword_addr.paddr_hi = 0;
+ frags[1].tword_addr.len_16 = 0;
+ }
+ flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ break;
+ case ATH10K_HW_TXRX_MGMT:
+ flags0 |= SM(ATH10K_HW_TXRX_MGMT,
+ HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
+
+ frags_paddr = skb_cb->paddr;
+ break;
+ }
+
+ /* Normally all commands go through HTC which manages tx credits for
+ * each endpoint and notifies when tx is completed.
+ *
+ * HTT endpoint is creditless so there's no need to care about HTC
+ * flags. In that case it is trivial to fill the HTC header here.
+ *
+ * MSDU transmission is considered completed upon HTT event. This
+ * implies no relevant resources can be freed until after the event is
+ * received. That's why HTC tx completion handler itself is ignored by
+ * setting NULL to transfer_context for all sg items.
+ *
+ * There is simply no point in pushing HTT TX_FRM through HTC tx path
+ * as it's a waste of resources. By bypassing HTC it is possible to
+ * avoid extra memory allocations, compress data structures and thus
+ * improve performance.
+ */
+
+ txbuf->htc_hdr.eid = htt->eid;
+ txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx) +
+ prefetch_len);
+ txbuf->htc_hdr.flags = 0;
+
+ if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT)
+ flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
+
+ flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
+ flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ if (msdu->ip_summed == CHECKSUM_PARTIAL &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
+ if (ar->hw_params.continuous_frag_desc) {
+ memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag));
+ ext_desc->tso_flag[3] |=
+ __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64);
+ }
+ }
+
+ /* Prevent firmware from sending up tx inspection requests. There's
+ * nothing ath10k can do with frames requested for inspection so force
+ * it to simply rely a regular tx completion with discard status.
+ */
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;
+
+ txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
+ txbuf->cmd_tx.flags0 = flags0;
+ txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
+ txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
+ txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
+
+ /* fill fragment descriptor */
+ txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr);
+ if (ath10k_mac_tx_frm_has_freq(ar)) {
+ txbuf->cmd_tx.offchan_tx.peerid =
+ __cpu_to_le16(HTT_INVALID_PEERID);
+ txbuf->cmd_tx.offchan_tx.freq =
+ __cpu_to_le16(freq);
+ } else {
+ txbuf->cmd_tx.peerid =
+ __cpu_to_le32(HTT_INVALID_PEERID);
+ }
+
+ trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %pad, msdu_paddr %pad vdev %hhu tid %hhu freq %hu\n",
+ flags0, flags1, msdu->len, msdu_id, &frags_paddr,
+ &skb_cb->paddr, vdev_id, tid, freq);
+ ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
+ msdu->data, msdu->len);
+ trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
+ trace_ath10k_tx_payload(ar, msdu->data, msdu->len);
+
+ sg_items[0].transfer_id = 0;
+ sg_items[0].transfer_context = NULL;
+ sg_items[0].vaddr = &txbuf->htc_hdr;
+ sg_items[0].paddr = txbuf_paddr +
+ sizeof(txbuf->frags);
+ sg_items[0].len = sizeof(txbuf->htc_hdr) +
+ sizeof(txbuf->cmd_hdr) +
+ sizeof(txbuf->cmd_tx);
+
+ sg_items[1].transfer_id = 0;
+ sg_items[1].transfer_context = NULL;
+ sg_items[1].vaddr = msdu->data;
+ sg_items[1].paddr = skb_cb->paddr;
+ sg_items[1].len = prefetch_len;
+
+ res = ath10k_hif_tx_sg(htt->ar,
+ htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
+ sg_items, ARRAY_SIZE(sg_items));
+ if (res)
+ goto err_unmap_msdu;
+
+ return 0;
+
+err_unmap_msdu:
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+err_free_msdu_id:
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_free_msdu_id(htt, msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+err:
+ return res;
+}
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_32 = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32,
+ .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32,
+ .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32,
+ .htt_tx = ath10k_htt_tx_32,
+ .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32,
+ .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32,
+};
+
+static const struct ath10k_htt_tx_ops htt_tx_ops_64 = {
+ .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64,
+ .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64,
+ .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64,
+ .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64,
+ .htt_tx = ath10k_htt_tx_64,
+ .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64,
+ .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64,
+};
+
+void ath10k_htt_set_tx_ops(struct ath10k_htt *htt)
+{
+ struct ath10k *ar = htt->ar;
+
+ if (ar->hw_params.target_64bit)
+ htt->tx_ops = &htt_tx_ops_64;
+ else
+ htt->tx_ops = &htt_tx_ops_32;
+}
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
new file mode 100644
index 000000000..476e0535f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -0,0 +1,940 @@
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include "core.h"
+#include "hw.h"
+#include "hif.h"
+#include "wmi-ops.h"
+#include "bmi.h"
+
+const struct ath10k_hw_regs qca988x_regs = {
+ .rtc_soc_base_address = 0x00004000,
+ .rtc_wmac_base_address = 0x00005000,
+ .soc_core_base_address = 0x00009000,
+ .wlan_mac_base_address = 0x00020000,
+ .ce_wrapper_base_address = 0x00057000,
+ .ce0_base_address = 0x00057400,
+ .ce1_base_address = 0x00057800,
+ .ce2_base_address = 0x00057c00,
+ .ce3_base_address = 0x00058000,
+ .ce4_base_address = 0x00058400,
+ .ce5_base_address = 0x00058800,
+ .ce6_base_address = 0x00058c00,
+ .ce7_base_address = 0x00059000,
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00040000,
+ .soc_chip_id_address = 0x000000ec,
+ .scratch_3_address = 0x00000030,
+ .fw_indicator_address = 0x00009030,
+ .pcie_local_base_address = 0x00080000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
+ .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+ .pcie_intr_fw_mask = 0x00000400,
+ .pcie_intr_ce_mask_all = 0x0007f800,
+ .pcie_intr_clr_address = 0x00000014,
+};
+
+const struct ath10k_hw_regs qca6174_regs = {
+ .rtc_soc_base_address = 0x00000800,
+ .rtc_wmac_base_address = 0x00001000,
+ .soc_core_base_address = 0x0003a000,
+ .wlan_mac_base_address = 0x00010000,
+ .ce_wrapper_base_address = 0x00034000,
+ .ce0_base_address = 0x00034400,
+ .ce1_base_address = 0x00034800,
+ .ce2_base_address = 0x00034c00,
+ .ce3_base_address = 0x00035000,
+ .ce4_base_address = 0x00035400,
+ .ce5_base_address = 0x00035800,
+ .ce6_base_address = 0x00035c00,
+ .ce7_base_address = 0x00036000,
+ .soc_reset_control_si0_rst_mask = 0x00000000,
+ .soc_reset_control_ce_rst_mask = 0x00000001,
+ .soc_chip_id_address = 0x000000f0,
+ .scratch_3_address = 0x00000028,
+ .fw_indicator_address = 0x0003a028,
+ .pcie_local_base_address = 0x00080000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x00000008,
+ .ce_wrap_intr_sum_host_msi_mask = 0x0000ff00,
+ .pcie_intr_fw_mask = 0x00000400,
+ .pcie_intr_ce_mask_all = 0x0007f800,
+ .pcie_intr_clr_address = 0x00000014,
+ .cpu_pll_init_address = 0x00404020,
+ .cpu_speed_address = 0x00404024,
+ .core_clk_div_address = 0x00404028,
+};
+
+const struct ath10k_hw_regs qca99x0_regs = {
+ .rtc_soc_base_address = 0x00080000,
+ .rtc_wmac_base_address = 0x00000000,
+ .soc_core_base_address = 0x00082000,
+ .wlan_mac_base_address = 0x00030000,
+ .ce_wrapper_base_address = 0x0004d000,
+ .ce0_base_address = 0x0004a000,
+ .ce1_base_address = 0x0004a400,
+ .ce2_base_address = 0x0004a800,
+ .ce3_base_address = 0x0004ac00,
+ .ce4_base_address = 0x0004b000,
+ .ce5_base_address = 0x0004b400,
+ .ce6_base_address = 0x0004b800,
+ .ce7_base_address = 0x0004bc00,
+ /* Note: qca99x0 supports upto 12 Copy Engines. Other than address of
+ * CE0 and CE1 no other copy engine is directly referred in the code.
+ * It is not really necessary to assign address for newly supported
+ * CEs in this address table.
+ * Copy Engine Address
+ * CE8 0x0004c000
+ * CE9 0x0004c400
+ * CE10 0x0004c800
+ * CE11 0x0004cc00
+ */
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .soc_chip_id_address = 0x000000ec,
+ .scratch_3_address = 0x00040050,
+ .fw_indicator_address = 0x00040050,
+ .pcie_local_base_address = 0x00000000,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+ .pcie_intr_ce_mask_all = 0x000fff00,
+ .pcie_intr_clr_address = 0x00000010,
+};
+
+const struct ath10k_hw_regs qca4019_regs = {
+ .rtc_soc_base_address = 0x00080000,
+ .soc_core_base_address = 0x00082000,
+ .wlan_mac_base_address = 0x00030000,
+ .ce_wrapper_base_address = 0x0004d000,
+ .ce0_base_address = 0x0004a000,
+ .ce1_base_address = 0x0004a400,
+ .ce2_base_address = 0x0004a800,
+ .ce3_base_address = 0x0004ac00,
+ .ce4_base_address = 0x0004b000,
+ .ce5_base_address = 0x0004b400,
+ .ce6_base_address = 0x0004b800,
+ .ce7_base_address = 0x0004bc00,
+ /* qca4019 supports upto 12 copy engines. Since base address
+ * of ce8 to ce11 are not directly referred in the code,
+ * no need have them in separate members in this table.
+ * Copy Engine Address
+ * CE8 0x0004c000
+ * CE9 0x0004c400
+ * CE10 0x0004c800
+ * CE11 0x0004cc00
+ */
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .soc_chip_id_address = 0x000000ec,
+ .fw_indicator_address = 0x0004f00c,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+ .pcie_intr_ce_mask_all = 0x000fff00,
+ .pcie_intr_clr_address = 0x00000010,
+};
+
+const struct ath10k_hw_values qca988x_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 8,
+ .msi_assign_ce_max = 7,
+ .num_target_ce_config_wlan = 7,
+ .ce_desc_meta_data_mask = 0xFFFC,
+ .ce_desc_meta_data_lsb = 2,
+};
+
+const struct ath10k_hw_values qca6174_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 8,
+ .msi_assign_ce_max = 7,
+ .num_target_ce_config_wlan = 7,
+ .ce_desc_meta_data_mask = 0xFFFC,
+ .ce_desc_meta_data_lsb = 2,
+};
+
+const struct ath10k_hw_values qca99x0_values = {
+ .rtc_state_val_on = 7,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
+const struct ath10k_hw_values qca9888_values = {
+ .rtc_state_val_on = 3,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
+const struct ath10k_hw_values qca4019_values = {
+ .ce_count = 12,
+ .num_target_ce_config_wlan = 10,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
+const struct ath10k_hw_regs wcn3990_regs = {
+ .rtc_soc_base_address = 0x00000000,
+ .rtc_wmac_base_address = 0x00000000,
+ .soc_core_base_address = 0x00000000,
+ .ce_wrapper_base_address = 0x0024C000,
+ .ce0_base_address = 0x00240000,
+ .ce1_base_address = 0x00241000,
+ .ce2_base_address = 0x00242000,
+ .ce3_base_address = 0x00243000,
+ .ce4_base_address = 0x00244000,
+ .ce5_base_address = 0x00245000,
+ .ce6_base_address = 0x00246000,
+ .ce7_base_address = 0x00247000,
+ .ce8_base_address = 0x00248000,
+ .ce9_base_address = 0x00249000,
+ .ce10_base_address = 0x0024A000,
+ .ce11_base_address = 0x0024B000,
+ .soc_chip_id_address = 0x000000f0,
+ .soc_reset_control_si0_rst_mask = 0x00000001,
+ .soc_reset_control_ce_rst_mask = 0x00000100,
+ .ce_wrap_intr_sum_host_msi_lsb = 0x0000000c,
+ .ce_wrap_intr_sum_host_msi_mask = 0x00fff000,
+ .pcie_intr_fw_mask = 0x00100000,
+};
+
+static struct ath10k_hw_ce_regs_addr_map wcn3990_src_ring = {
+ .msb = 0x00000010,
+ .lsb = 0x00000010,
+ .mask = GENMASK(17, 17),
+};
+
+static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_ring = {
+ .msb = 0x00000012,
+ .lsb = 0x00000012,
+ .mask = GENMASK(18, 18),
+};
+
+static struct ath10k_hw_ce_regs_addr_map wcn3990_dmax = {
+ .msb = 0x00000000,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static struct ath10k_hw_ce_ctrl1 wcn3990_ctrl1 = {
+ .addr = 0x00000018,
+ .src_ring = &wcn3990_src_ring,
+ .dst_ring = &wcn3990_dst_ring,
+ .dmax = &wcn3990_dmax,
+};
+
+static struct ath10k_hw_ce_regs_addr_map wcn3990_host_ie_cc = {
+ .mask = GENMASK(0, 0),
+};
+
+static struct ath10k_hw_ce_host_ie wcn3990_host_ie = {
+ .copy_complete = &wcn3990_host_ie_cc,
+};
+
+static struct ath10k_hw_ce_host_wm_regs wcn3990_wm_reg = {
+ .dstr_lmask = 0x00000010,
+ .dstr_hmask = 0x00000008,
+ .srcr_lmask = 0x00000004,
+ .srcr_hmask = 0x00000002,
+ .cc_mask = 0x00000001,
+ .wm_mask = 0x0000001E,
+ .addr = 0x00000030,
+};
+
+static struct ath10k_hw_ce_misc_regs wcn3990_misc_reg = {
+ .axi_err = 0x00000100,
+ .dstr_add_err = 0x00000200,
+ .srcr_len_err = 0x00000100,
+ .dstr_mlen_vio = 0x00000080,
+ .dstr_overflow = 0x00000040,
+ .srcr_overflow = 0x00000020,
+ .err_mask = 0x000003E0,
+ .addr = 0x00000038,
+};
+
+static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_low = {
+ .msb = 0x00000000,
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static struct ath10k_hw_ce_regs_addr_map wcn3990_src_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_src_ring = {
+ .addr = 0x0000004c,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &wcn3990_src_wm_low,
+ .wm_high = &wcn3990_src_wm_high,
+};
+
+static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_low = {
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static struct ath10k_hw_ce_regs_addr_map wcn3990_dst_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static struct ath10k_hw_ce_dst_src_wm_regs wcn3990_wm_dst_ring = {
+ .addr = 0x00000050,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &wcn3990_dst_wm_low,
+ .wm_high = &wcn3990_dst_wm_high,
+};
+
+static struct ath10k_hw_ce_ctrl1_upd wcn3990_ctrl1_upd = {
+ .shift = 19,
+ .mask = 0x00080000,
+ .enable = 0x00000000,
+};
+
+const struct ath10k_hw_ce_regs wcn3990_ce_regs = {
+ .sr_base_addr = 0x00000000,
+ .sr_size_addr = 0x00000008,
+ .dr_base_addr = 0x0000000c,
+ .dr_size_addr = 0x00000014,
+ .misc_ie_addr = 0x00000034,
+ .sr_wr_index_addr = 0x0000003c,
+ .dst_wr_index_addr = 0x00000040,
+ .current_srri_addr = 0x00000044,
+ .current_drri_addr = 0x00000048,
+ .ce_rri_low = 0x0024C004,
+ .ce_rri_high = 0x0024C008,
+ .host_ie_addr = 0x0000002c,
+ .ctrl1_regs = &wcn3990_ctrl1,
+ .host_ie = &wcn3990_host_ie,
+ .wm_regs = &wcn3990_wm_reg,
+ .misc_regs = &wcn3990_misc_reg,
+ .wm_srcr = &wcn3990_wm_src_ring,
+ .wm_dstr = &wcn3990_wm_dst_ring,
+ .upd = &wcn3990_ctrl1_upd,
+};
+
+const struct ath10k_hw_values wcn3990_values = {
+ .rtc_state_val_on = 5,
+ .ce_count = 12,
+ .msi_assign_ce_max = 12,
+ .num_target_ce_config_wlan = 12,
+ .ce_desc_meta_data_mask = 0xFFF0,
+ .ce_desc_meta_data_lsb = 4,
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_src_ring = {
+ .msb = 0x00000010,
+ .lsb = 0x00000010,
+ .mask = GENMASK(16, 16),
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_dst_ring = {
+ .msb = 0x00000011,
+ .lsb = 0x00000011,
+ .mask = GENMASK(17, 17),
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_dmax = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static struct ath10k_hw_ce_ctrl1 qcax_ctrl1 = {
+ .addr = 0x00000010,
+ .hw_mask = 0x0007ffff,
+ .sw_mask = 0x0007ffff,
+ .hw_wr_mask = 0x00000000,
+ .sw_wr_mask = 0x0007ffff,
+ .reset_mask = 0xffffffff,
+ .reset = 0x00000080,
+ .src_ring = &qcax_src_ring,
+ .dst_ring = &qcax_dst_ring,
+ .dmax = &qcax_dmax,
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_cmd_halt_status = {
+ .msb = 0x00000003,
+ .lsb = 0x00000003,
+ .mask = GENMASK(3, 3),
+};
+
+static struct ath10k_hw_ce_cmd_halt qcax_cmd_halt = {
+ .msb = 0x00000000,
+ .mask = GENMASK(0, 0),
+ .status_reset = 0x00000000,
+ .status = &qcax_cmd_halt_status,
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_host_ie_cc = {
+ .msb = 0x00000000,
+ .lsb = 0x00000000,
+ .mask = GENMASK(0, 0),
+};
+
+static struct ath10k_hw_ce_host_ie qcax_host_ie = {
+ .copy_complete_reset = 0x00000000,
+ .copy_complete = &qcax_host_ie_cc,
+};
+
+static struct ath10k_hw_ce_host_wm_regs qcax_wm_reg = {
+ .dstr_lmask = 0x00000010,
+ .dstr_hmask = 0x00000008,
+ .srcr_lmask = 0x00000004,
+ .srcr_hmask = 0x00000002,
+ .cc_mask = 0x00000001,
+ .wm_mask = 0x0000001E,
+ .addr = 0x00000030,
+};
+
+static struct ath10k_hw_ce_misc_regs qcax_misc_reg = {
+ .axi_err = 0x00000400,
+ .dstr_add_err = 0x00000200,
+ .srcr_len_err = 0x00000100,
+ .dstr_mlen_vio = 0x00000080,
+ .dstr_overflow = 0x00000040,
+ .srcr_overflow = 0x00000020,
+ .err_mask = 0x000007E0,
+ .addr = 0x00000038,
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_low = {
+ .msb = 0x0000001f,
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_src_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_src_ring = {
+ .addr = 0x0000004c,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &qcax_src_wm_low,
+ .wm_high = &qcax_src_wm_high,
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_low = {
+ .lsb = 0x00000010,
+ .mask = GENMASK(31, 16),
+};
+
+static struct ath10k_hw_ce_regs_addr_map qcax_dst_wm_high = {
+ .msb = 0x0000000f,
+ .lsb = 0x00000000,
+ .mask = GENMASK(15, 0),
+};
+
+static struct ath10k_hw_ce_dst_src_wm_regs qcax_wm_dst_ring = {
+ .addr = 0x00000050,
+ .low_rst = 0x00000000,
+ .high_rst = 0x00000000,
+ .wm_low = &qcax_dst_wm_low,
+ .wm_high = &qcax_dst_wm_high,
+};
+
+const struct ath10k_hw_ce_regs qcax_ce_regs = {
+ .sr_base_addr = 0x00000000,
+ .sr_size_addr = 0x00000004,
+ .dr_base_addr = 0x00000008,
+ .dr_size_addr = 0x0000000c,
+ .ce_cmd_addr = 0x00000018,
+ .misc_ie_addr = 0x00000034,
+ .sr_wr_index_addr = 0x0000003c,
+ .dst_wr_index_addr = 0x00000040,
+ .current_srri_addr = 0x00000044,
+ .current_drri_addr = 0x00000048,
+ .host_ie_addr = 0x0000002c,
+ .ctrl1_regs = &qcax_ctrl1,
+ .cmd_halt = &qcax_cmd_halt,
+ .host_ie = &qcax_host_ie,
+ .wm_regs = &qcax_wm_reg,
+ .misc_regs = &qcax_misc_reg,
+ .wm_srcr = &qcax_wm_src_ring,
+ .wm_dstr = &qcax_wm_dst_ring,
+};
+
+const struct ath10k_hw_clk_params qca6174_clk[ATH10K_HW_REFCLK_COUNT] = {
+ {
+ .refclk = 48000000,
+ .div = 0xe,
+ .rnfrac = 0x2aaa8,
+ .settle_time = 2400,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 19200000,
+ .div = 0x24,
+ .rnfrac = 0x2aaa8,
+ .settle_time = 960,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 24000000,
+ .div = 0x1d,
+ .rnfrac = 0x15551,
+ .settle_time = 1200,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 26000000,
+ .div = 0x1b,
+ .rnfrac = 0x4ec4,
+ .settle_time = 1300,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 37400000,
+ .div = 0x12,
+ .rnfrac = 0x34b49,
+ .settle_time = 1870,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 38400000,
+ .div = 0x12,
+ .rnfrac = 0x15551,
+ .settle_time = 1920,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 40000000,
+ .div = 0x12,
+ .rnfrac = 0x26665,
+ .settle_time = 2000,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+ {
+ .refclk = 52000000,
+ .div = 0x1b,
+ .rnfrac = 0x4ec4,
+ .settle_time = 2600,
+ .refdiv = 0,
+ .outdiv = 1,
+ },
+};
+
+void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
+ u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
+{
+ u32 cc_fix = 0;
+ u32 rcc_fix = 0;
+ enum ath10k_hw_cc_wraparound_type wraparound_type;
+
+ survey->filled |= SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+
+ wraparound_type = ar->hw_params.cc_wraparound_type;
+
+ if (cc < cc_prev || rcc < rcc_prev) {
+ switch (wraparound_type) {
+ case ATH10K_HW_CC_WRAP_SHIFTED_ALL:
+ if (cc < cc_prev) {
+ cc_fix = 0x7fffffff;
+ survey->filled &= ~SURVEY_INFO_TIME_BUSY;
+ }
+ break;
+ case ATH10K_HW_CC_WRAP_SHIFTED_EACH:
+ if (cc < cc_prev)
+ cc_fix = 0x7fffffff;
+
+ if (rcc < rcc_prev)
+ rcc_fix = 0x7fffffff;
+ break;
+ case ATH10K_HW_CC_WRAP_DISABLED:
+ break;
+ }
+ }
+
+ cc -= cc_prev - cc_fix;
+ rcc -= rcc_prev - rcc_fix;
+
+ survey->time = CCNT_TO_MSEC(ar, cc);
+ survey->time_busy = CCNT_TO_MSEC(ar, rcc);
+}
+
+/* The firmware does not support setting the coverage class. Instead this
+ * function monitors and modifies the corresponding MAC registers.
+ */
+static void ath10k_hw_qca988x_set_coverage_class(struct ath10k *ar,
+ s16 value)
+{
+ u32 slottime_reg;
+ u32 slottime;
+ u32 timeout_reg;
+ u32 ack_timeout;
+ u32 cts_timeout;
+ u32 phyclk_reg;
+ u32 phyclk;
+ u64 fw_dbglog_mask;
+ u32 fw_dbglog_level;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Only modify registers if the core is started. */
+ if ((ar->state != ATH10K_STATE_ON) &&
+ (ar->state != ATH10K_STATE_RESTARTED)) {
+ spin_lock_bh(&ar->data_lock);
+ /* Store config value for when radio boots up */
+ ar->fw_coverage.coverage_class = value;
+ spin_unlock_bh(&ar->data_lock);
+ goto unlock;
+ }
+
+ /* Retrieve the current values of the two registers that need to be
+ * adjusted.
+ */
+ slottime_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
+ WAVE1_PCU_GBL_IFS_SLOT);
+ timeout_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
+ WAVE1_PCU_ACK_CTS_TIMEOUT);
+ phyclk_reg = ath10k_hif_read32(ar, WLAN_MAC_BASE_ADDRESS +
+ WAVE1_PHYCLK);
+ phyclk = MS(phyclk_reg, WAVE1_PHYCLK_USEC) + 1;
+
+ if (value < 0)
+ value = ar->fw_coverage.coverage_class;
+
+ /* Break out if the coverage class and registers have the expected
+ * value.
+ */
+ if (value == ar->fw_coverage.coverage_class &&
+ slottime_reg == ar->fw_coverage.reg_slottime_conf &&
+ timeout_reg == ar->fw_coverage.reg_ack_cts_timeout_conf &&
+ phyclk_reg == ar->fw_coverage.reg_phyclk)
+ goto unlock;
+
+ /* Store new initial register values from the firmware. */
+ if (slottime_reg != ar->fw_coverage.reg_slottime_conf)
+ ar->fw_coverage.reg_slottime_orig = slottime_reg;
+ if (timeout_reg != ar->fw_coverage.reg_ack_cts_timeout_conf)
+ ar->fw_coverage.reg_ack_cts_timeout_orig = timeout_reg;
+ ar->fw_coverage.reg_phyclk = phyclk_reg;
+
+ /* Calculate new value based on the (original) firmware calculation. */
+ slottime_reg = ar->fw_coverage.reg_slottime_orig;
+ timeout_reg = ar->fw_coverage.reg_ack_cts_timeout_orig;
+
+ /* Do some sanity checks on the slottime register. */
+ if (slottime_reg % phyclk) {
+ ath10k_warn(ar,
+ "failed to set coverage class: expected integer microsecond value in register\n");
+
+ goto store_regs;
+ }
+
+ slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
+ slottime = slottime / phyclk;
+ if (slottime != 9 && slottime != 20) {
+ ath10k_warn(ar,
+ "failed to set coverage class: expected slot time of 9 or 20us in HW register. It is %uus.\n",
+ slottime);
+
+ goto store_regs;
+ }
+
+ /* Recalculate the register values by adding the additional propagation
+ * delay (3us per coverage class).
+ */
+
+ slottime = MS(slottime_reg, WAVE1_PCU_GBL_IFS_SLOT);
+ slottime += value * 3 * phyclk;
+ slottime = min_t(u32, slottime, WAVE1_PCU_GBL_IFS_SLOT_MAX);
+ slottime = SM(slottime, WAVE1_PCU_GBL_IFS_SLOT);
+ slottime_reg = (slottime_reg & ~WAVE1_PCU_GBL_IFS_SLOT_MASK) | slottime;
+
+ /* Update ack timeout (lower halfword). */
+ ack_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
+ ack_timeout += 3 * value * phyclk;
+ ack_timeout = min_t(u32, ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
+ ack_timeout = SM(ack_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_ACK);
+
+ /* Update cts timeout (upper halfword). */
+ cts_timeout = MS(timeout_reg, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
+ cts_timeout += 3 * value * phyclk;
+ cts_timeout = min_t(u32, cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_MAX);
+ cts_timeout = SM(cts_timeout, WAVE1_PCU_ACK_CTS_TIMEOUT_CTS);
+
+ timeout_reg = ack_timeout | cts_timeout;
+
+ ath10k_hif_write32(ar,
+ WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_GBL_IFS_SLOT,
+ slottime_reg);
+ ath10k_hif_write32(ar,
+ WLAN_MAC_BASE_ADDRESS + WAVE1_PCU_ACK_CTS_TIMEOUT,
+ timeout_reg);
+
+ /* Ensure we have a debug level of WARN set for the case that the
+ * coverage class is larger than 0. This is important as we need to
+ * set the registers again if the firmware does an internal reset and
+ * this way we will be notified of the event.
+ */
+ fw_dbglog_mask = ath10k_debug_get_fw_dbglog_mask(ar);
+ fw_dbglog_level = ath10k_debug_get_fw_dbglog_level(ar);
+
+ if (value > 0) {
+ if (fw_dbglog_level > ATH10K_DBGLOG_LEVEL_WARN)
+ fw_dbglog_level = ATH10K_DBGLOG_LEVEL_WARN;
+ fw_dbglog_mask = ~0;
+ }
+
+ ath10k_wmi_dbglog_cfg(ar, fw_dbglog_mask, fw_dbglog_level);
+
+store_regs:
+ /* After an error we will not retry setting the coverage class. */
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_coverage.coverage_class = value;
+ spin_unlock_bh(&ar->data_lock);
+
+ ar->fw_coverage.reg_slottime_conf = slottime_reg;
+ ar->fw_coverage.reg_ack_cts_timeout_conf = timeout_reg;
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/**
+ * ath10k_hw_qca6174_enable_pll_clock() - enable the qca6174 hw pll clock
+ * @ar: the ath10k blob
+ *
+ * This function is very hardware specific, the clock initialization
+ * steps is very sensitive and could lead to unknown crash, so they
+ * should be done in sequence.
+ *
+ * *** Be aware if you planned to refactor them. ***
+ *
+ * Return: 0 if successfully enable the pll, otherwise EINVAL
+ */
+static int ath10k_hw_qca6174_enable_pll_clock(struct ath10k *ar)
+{
+ int ret, wait_limit;
+ u32 clk_div_addr, pll_init_addr, speed_addr;
+ u32 addr, reg_val, mem_val;
+ struct ath10k_hw_params *hw;
+ const struct ath10k_hw_clk_params *hw_clk;
+
+ hw = &ar->hw_params;
+
+ if (ar->regs->core_clk_div_address == 0 ||
+ ar->regs->cpu_pll_init_address == 0 ||
+ ar->regs->cpu_speed_address == 0)
+ return -EINVAL;
+
+ clk_div_addr = ar->regs->core_clk_div_address;
+ pll_init_addr = ar->regs->cpu_pll_init_address;
+ speed_addr = ar->regs->cpu_speed_address;
+
+ /* Read efuse register to find out the right hw clock configuration */
+ addr = (RTC_SOC_BASE_ADDRESS | EFUSE_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* sanitize if the hw refclk index is out of the boundary */
+ if (MS(reg_val, EFUSE_XTAL_SEL) > ATH10K_HW_REFCLK_COUNT)
+ return -EINVAL;
+
+ hw_clk = &hw->hw_clk[MS(reg_val, EFUSE_XTAL_SEL)];
+
+ /* Set the rnfrac and outdiv params to bb_pll register */
+ addr = (RTC_SOC_BASE_ADDRESS | BB_PLL_CONFIG_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~(BB_PLL_CONFIG_FRAC_MASK | BB_PLL_CONFIG_OUTDIV_MASK);
+ reg_val |= (SM(hw_clk->rnfrac, BB_PLL_CONFIG_FRAC) |
+ SM(hw_clk->outdiv, BB_PLL_CONFIG_OUTDIV));
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* Set the correct settle time value to pll_settle register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_SETTLE_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~WLAN_PLL_SETTLE_TIME_MASK;
+ reg_val |= SM(hw_clk->settle_time, WLAN_PLL_SETTLE_TIME);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* Set the clock_ctrl div to core_clk_ctrl register */
+ addr = (RTC_SOC_BASE_ADDRESS | SOC_CORE_CLK_CTRL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~SOC_CORE_CLK_CTRL_DIV_MASK;
+ reg_val |= SM(1, SOC_CORE_CLK_CTRL_DIV);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* Set the clock_div register */
+ mem_val = 1;
+ ret = ath10k_bmi_write_memory(ar, clk_div_addr, &mem_val,
+ sizeof(mem_val));
+ if (ret)
+ return -EINVAL;
+
+ /* Configure the pll_control register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val |= (SM(hw_clk->refdiv, WLAN_PLL_CONTROL_REFDIV) |
+ SM(hw_clk->div, WLAN_PLL_CONTROL_DIV) |
+ SM(1, WLAN_PLL_CONTROL_NOPWD));
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* busy wait (max 1s) the rtc_sync status register indicate ready */
+ wait_limit = 100000;
+ addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
+ do {
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ break;
+
+ wait_limit--;
+ udelay(10);
+
+ } while (wait_limit > 0);
+
+ if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ return -EINVAL;
+
+ /* Unset the pll_bypass in pll_control register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~WLAN_PLL_CONTROL_BYPASS_MASK;
+ reg_val |= SM(0, WLAN_PLL_CONTROL_BYPASS);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* busy wait (max 1s) the rtc_sync status register indicate ready */
+ wait_limit = 100000;
+ addr = (RTC_WMAC_BASE_ADDRESS | RTC_SYNC_STATUS_OFFSET);
+ do {
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ if (!MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ break;
+
+ wait_limit--;
+ udelay(10);
+
+ } while (wait_limit > 0);
+
+ if (MS(reg_val, RTC_SYNC_STATUS_PLL_CHANGING))
+ return -EINVAL;
+
+ /* Enable the hardware cpu clock register */
+ addr = (RTC_SOC_BASE_ADDRESS | SOC_CPU_CLOCK_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~SOC_CPU_CLOCK_STANDARD_MASK;
+ reg_val |= SM(1, SOC_CPU_CLOCK_STANDARD);
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* unset the nopwd from pll_control register */
+ addr = (RTC_WMAC_BASE_ADDRESS | WLAN_PLL_CONTROL_OFFSET);
+ ret = ath10k_bmi_read_soc_reg(ar, addr, &reg_val);
+ if (ret)
+ return -EINVAL;
+
+ reg_val &= ~WLAN_PLL_CONTROL_NOPWD_MASK;
+ ret = ath10k_bmi_write_soc_reg(ar, addr, reg_val);
+ if (ret)
+ return -EINVAL;
+
+ /* enable the pll_init register */
+ mem_val = 1;
+ ret = ath10k_bmi_write_memory(ar, pll_init_addr, &mem_val,
+ sizeof(mem_val));
+ if (ret)
+ return -EINVAL;
+
+ /* set the target clock frequency to speed register */
+ ret = ath10k_bmi_write_memory(ar, speed_addr, &hw->target_cpu_freq,
+ sizeof(hw->target_cpu_freq));
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+const struct ath10k_hw_ops qca988x_ops = {
+ .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
+};
+
+static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd)
+{
+ return MS(__le32_to_cpu(rxd->msdu_end.qca99x0.info1),
+ RX_MSDU_END_INFO1_L3_HDR_PAD);
+}
+
+const struct ath10k_hw_ops qca99x0_ops = {
+ .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes,
+};
+
+const struct ath10k_hw_ops qca6174_ops = {
+ .set_coverage_class = ath10k_hw_qca988x_set_coverage_class,
+ .enable_pll_clk = ath10k_hw_qca6174_enable_pll_clock,
+};
+
+const struct ath10k_hw_ops wcn3990_ops = {};
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
new file mode 100644
index 000000000..3ff65a0a8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -0,0 +1,1132 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _HW_H_
+#define _HW_H_
+
+#include "targaddrs.h"
+
+#define ATH10K_FW_DIR "ath10k"
+
+#define QCA988X_2_0_DEVICE_ID_UBNT (0x11ac)
+#define QCA988X_2_0_DEVICE_ID (0x003c)
+#define QCA6164_2_1_DEVICE_ID (0x0041)
+#define QCA6174_2_1_DEVICE_ID (0x003e)
+#define QCA99X0_2_0_DEVICE_ID (0x0040)
+#define QCA9888_2_0_DEVICE_ID (0x0056)
+#define QCA9984_1_0_DEVICE_ID (0x0046)
+#define QCA9377_1_0_DEVICE_ID (0x0042)
+#define QCA9887_1_0_DEVICE_ID (0x0050)
+
+/* QCA988X 1.0 definitions (unsupported) */
+#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
+
+/* QCA988X 2.0 definitions */
+#define QCA988X_HW_2_0_VERSION 0x4100016c
+#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
+#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
+#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9887 1.0 definitions */
+#define QCA9887_HW_1_0_VERSION 0x4100016d
+#define QCA9887_HW_1_0_CHIP_ID_REV 0
+#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0"
+#define QCA9887_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA6174 target BMI version signatures */
+#define QCA6174_HW_1_0_VERSION 0x05000000
+#define QCA6174_HW_1_1_VERSION 0x05000001
+#define QCA6174_HW_1_3_VERSION 0x05000003
+#define QCA6174_HW_2_1_VERSION 0x05010000
+#define QCA6174_HW_3_0_VERSION 0x05020000
+#define QCA6174_HW_3_2_VERSION 0x05030000
+
+/* QCA9377 target BMI version signatures */
+#define QCA9377_HW_1_0_DEV_VERSION 0x05020000
+#define QCA9377_HW_1_1_DEV_VERSION 0x05020001
+
+enum qca6174_pci_rev {
+ QCA6174_PCI_REV_1_1 = 0x11,
+ QCA6174_PCI_REV_1_3 = 0x13,
+ QCA6174_PCI_REV_2_0 = 0x20,
+ QCA6174_PCI_REV_3_0 = 0x30,
+};
+
+enum qca6174_chip_id_rev {
+ QCA6174_HW_1_0_CHIP_ID_REV = 0,
+ QCA6174_HW_1_1_CHIP_ID_REV = 1,
+ QCA6174_HW_1_3_CHIP_ID_REV = 2,
+ QCA6174_HW_2_1_CHIP_ID_REV = 4,
+ QCA6174_HW_2_2_CHIP_ID_REV = 5,
+ QCA6174_HW_3_0_CHIP_ID_REV = 8,
+ QCA6174_HW_3_1_CHIP_ID_REV = 9,
+ QCA6174_HW_3_2_CHIP_ID_REV = 10,
+};
+
+enum qca9377_chip_id_rev {
+ QCA9377_HW_1_0_CHIP_ID_REV = 0x0,
+ QCA9377_HW_1_1_CHIP_ID_REV = 0x1,
+};
+
+#define QCA6174_HW_2_1_FW_DIR ATH10K_FW_DIR "/QCA6174/hw2.1"
+#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
+#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
+
+#define QCA6174_HW_3_0_FW_DIR ATH10K_FW_DIR "/QCA6174/hw3.0"
+#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
+#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA99X0 1.0 definitions (unsupported) */
+#define QCA99X0_HW_1_0_CHIP_ID_REV 0x0
+
+/* QCA99X0 2.0 definitions */
+#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
+#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
+#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
+#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9984 1.0 defines */
+#define QCA9984_HW_1_0_DEV_VERSION 0x1000000
+#define QCA9984_HW_DEV_TYPE 0xa
+#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
+#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
+#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9888 2.0 defines */
+#define QCA9888_HW_2_0_DEV_VERSION 0x1000000
+#define QCA9888_HW_DEV_TYPE 0xc
+#define QCA9888_HW_2_0_CHIP_ID_REV 0x0
+#define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0"
+#define QCA9888_HW_2_0_BOARD_DATA_FILE "board.bin"
+#define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA9377 1.0 definitions */
+#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
+#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* QCA4019 1.0 definitions */
+#define QCA4019_HW_1_0_DEV_VERSION 0x01000000
+#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0"
+#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
+#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
+
+/* WCN3990 1.0 definitions */
+#define WCN3990_HW_1_0_DEV_VERSION ATH10K_HW_WCN3990
+#define WCN3990_HW_1_0_FW_DIR ATH10K_FW_DIR "/WCN3990/hw1.0"
+
+#define ATH10K_FW_FILE_BASE "firmware"
+#define ATH10K_FW_API_MAX 6
+#define ATH10K_FW_API_MIN 2
+
+#define ATH10K_FW_API2_FILE "firmware-2.bin"
+#define ATH10K_FW_API3_FILE "firmware-3.bin"
+
+/* added support for ATH10K_FW_IE_WMI_OP_VERSION */
+#define ATH10K_FW_API4_FILE "firmware-4.bin"
+
+/* HTT id conflict fix for management frames over HTT */
+#define ATH10K_FW_API5_FILE "firmware-5.bin"
+
+/* the firmware-6.bin blob */
+#define ATH10K_FW_API6_FILE "firmware-6.bin"
+
+#define ATH10K_FW_UTF_FILE "utf.bin"
+#define ATH10K_FW_UTF_API2_FILE "utf-2.bin"
+
+/* includes also the null byte */
+#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
+#define ATH10K_BOARD_MAGIC "QCA-ATH10K-BOARD"
+
+#define ATH10K_BOARD_API2_FILE "board-2.bin"
+
+#define REG_DUMP_COUNT_QCA988X 60
+
+struct ath10k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[0];
+};
+
+enum ath10k_fw_ie_type {
+ ATH10K_FW_IE_FW_VERSION = 0,
+ ATH10K_FW_IE_TIMESTAMP = 1,
+ ATH10K_FW_IE_FEATURES = 2,
+ ATH10K_FW_IE_FW_IMAGE = 3,
+ ATH10K_FW_IE_OTP_IMAGE = 4,
+
+ /* WMI "operations" interface version, 32 bit value. Supported from
+ * FW API 4 and above.
+ */
+ ATH10K_FW_IE_WMI_OP_VERSION = 5,
+
+ /* HTT "operations" interface version, 32 bit value. Supported from
+ * FW API 5 and above.
+ */
+ ATH10K_FW_IE_HTT_OP_VERSION = 6,
+
+ /* Code swap image for firmware binary */
+ ATH10K_FW_IE_FW_CODE_SWAP_IMAGE = 7,
+};
+
+enum ath10k_fw_wmi_op_version {
+ ATH10K_FW_WMI_OP_VERSION_UNSET = 0,
+
+ ATH10K_FW_WMI_OP_VERSION_MAIN = 1,
+ ATH10K_FW_WMI_OP_VERSION_10_1 = 2,
+ ATH10K_FW_WMI_OP_VERSION_10_2 = 3,
+ ATH10K_FW_WMI_OP_VERSION_TLV = 4,
+ ATH10K_FW_WMI_OP_VERSION_10_2_4 = 5,
+ ATH10K_FW_WMI_OP_VERSION_10_4 = 6,
+
+ /* keep last */
+ ATH10K_FW_WMI_OP_VERSION_MAX,
+};
+
+enum ath10k_fw_htt_op_version {
+ ATH10K_FW_HTT_OP_VERSION_UNSET = 0,
+
+ ATH10K_FW_HTT_OP_VERSION_MAIN = 1,
+
+ /* also used in 10.2 and 10.2.4 branches */
+ ATH10K_FW_HTT_OP_VERSION_10_1 = 2,
+
+ ATH10K_FW_HTT_OP_VERSION_TLV = 3,
+
+ ATH10K_FW_HTT_OP_VERSION_10_4 = 4,
+
+ /* keep last */
+ ATH10K_FW_HTT_OP_VERSION_MAX,
+};
+
+enum ath10k_bd_ie_type {
+ /* contains sub IEs of enum ath10k_bd_ie_board_type */
+ ATH10K_BD_IE_BOARD = 0,
+};
+
+enum ath10k_bd_ie_board_type {
+ ATH10K_BD_IE_BOARD_NAME = 0,
+ ATH10K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath10k_hw_rev {
+ ATH10K_HW_QCA988X,
+ ATH10K_HW_QCA6174,
+ ATH10K_HW_QCA99X0,
+ ATH10K_HW_QCA9888,
+ ATH10K_HW_QCA9984,
+ ATH10K_HW_QCA9377,
+ ATH10K_HW_QCA4019,
+ ATH10K_HW_QCA9887,
+ ATH10K_HW_WCN3990,
+};
+
+struct ath10k_hw_regs {
+ u32 rtc_soc_base_address;
+ u32 rtc_wmac_base_address;
+ u32 soc_core_base_address;
+ u32 wlan_mac_base_address;
+ u32 ce_wrapper_base_address;
+ u32 ce0_base_address;
+ u32 ce1_base_address;
+ u32 ce2_base_address;
+ u32 ce3_base_address;
+ u32 ce4_base_address;
+ u32 ce5_base_address;
+ u32 ce6_base_address;
+ u32 ce7_base_address;
+ u32 ce8_base_address;
+ u32 ce9_base_address;
+ u32 ce10_base_address;
+ u32 ce11_base_address;
+ u32 soc_reset_control_si0_rst_mask;
+ u32 soc_reset_control_ce_rst_mask;
+ u32 soc_chip_id_address;
+ u32 scratch_3_address;
+ u32 fw_indicator_address;
+ u32 pcie_local_base_address;
+ u32 ce_wrap_intr_sum_host_msi_lsb;
+ u32 ce_wrap_intr_sum_host_msi_mask;
+ u32 pcie_intr_fw_mask;
+ u32 pcie_intr_ce_mask_all;
+ u32 pcie_intr_clr_address;
+ u32 cpu_pll_init_address;
+ u32 cpu_speed_address;
+ u32 core_clk_div_address;
+};
+
+extern const struct ath10k_hw_regs qca988x_regs;
+extern const struct ath10k_hw_regs qca6174_regs;
+extern const struct ath10k_hw_regs qca99x0_regs;
+extern const struct ath10k_hw_regs qca4019_regs;
+extern const struct ath10k_hw_regs wcn3990_regs;
+
+struct ath10k_hw_ce_regs_addr_map {
+ u32 msb;
+ u32 lsb;
+ u32 mask;
+};
+
+struct ath10k_hw_ce_ctrl1 {
+ u32 addr;
+ u32 hw_mask;
+ u32 sw_mask;
+ u32 hw_wr_mask;
+ u32 sw_wr_mask;
+ u32 reset_mask;
+ u32 reset;
+ struct ath10k_hw_ce_regs_addr_map *src_ring;
+ struct ath10k_hw_ce_regs_addr_map *dst_ring;
+ struct ath10k_hw_ce_regs_addr_map *dmax; };
+
+struct ath10k_hw_ce_cmd_halt {
+ u32 status_reset;
+ u32 msb;
+ u32 mask;
+ struct ath10k_hw_ce_regs_addr_map *status; };
+
+struct ath10k_hw_ce_host_ie {
+ u32 copy_complete_reset;
+ struct ath10k_hw_ce_regs_addr_map *copy_complete; };
+
+struct ath10k_hw_ce_host_wm_regs {
+ u32 dstr_lmask;
+ u32 dstr_hmask;
+ u32 srcr_lmask;
+ u32 srcr_hmask;
+ u32 cc_mask;
+ u32 wm_mask;
+ u32 addr;
+};
+
+struct ath10k_hw_ce_misc_regs {
+ u32 axi_err;
+ u32 dstr_add_err;
+ u32 srcr_len_err;
+ u32 dstr_mlen_vio;
+ u32 dstr_overflow;
+ u32 srcr_overflow;
+ u32 err_mask;
+ u32 addr;
+};
+
+struct ath10k_hw_ce_dst_src_wm_regs {
+ u32 addr;
+ u32 low_rst;
+ u32 high_rst;
+ struct ath10k_hw_ce_regs_addr_map *wm_low;
+ struct ath10k_hw_ce_regs_addr_map *wm_high; };
+
+struct ath10k_hw_ce_ctrl1_upd {
+ u32 shift;
+ u32 mask;
+ u32 enable;
+};
+
+struct ath10k_hw_ce_regs {
+ u32 sr_base_addr;
+ u32 sr_size_addr;
+ u32 dr_base_addr;
+ u32 dr_size_addr;
+ u32 ce_cmd_addr;
+ u32 misc_ie_addr;
+ u32 sr_wr_index_addr;
+ u32 dst_wr_index_addr;
+ u32 current_srri_addr;
+ u32 current_drri_addr;
+ u32 ddr_addr_for_rri_low;
+ u32 ddr_addr_for_rri_high;
+ u32 ce_rri_low;
+ u32 ce_rri_high;
+ u32 host_ie_addr;
+ struct ath10k_hw_ce_host_wm_regs *wm_regs;
+ struct ath10k_hw_ce_misc_regs *misc_regs;
+ struct ath10k_hw_ce_ctrl1 *ctrl1_regs;
+ struct ath10k_hw_ce_cmd_halt *cmd_halt;
+ struct ath10k_hw_ce_host_ie *host_ie;
+ struct ath10k_hw_ce_dst_src_wm_regs *wm_srcr;
+ struct ath10k_hw_ce_dst_src_wm_regs *wm_dstr;
+ struct ath10k_hw_ce_ctrl1_upd *upd;
+};
+
+struct ath10k_hw_values {
+ u32 rtc_state_val_on;
+ u8 ce_count;
+ u8 msi_assign_ce_max;
+ u8 num_target_ce_config_wlan;
+ u16 ce_desc_meta_data_mask;
+ u8 ce_desc_meta_data_lsb;
+};
+
+extern const struct ath10k_hw_values qca988x_values;
+extern const struct ath10k_hw_values qca6174_values;
+extern const struct ath10k_hw_values qca99x0_values;
+extern const struct ath10k_hw_values qca9888_values;
+extern const struct ath10k_hw_values qca4019_values;
+extern const struct ath10k_hw_values wcn3990_values;
+extern const struct ath10k_hw_ce_regs wcn3990_ce_regs;
+extern const struct ath10k_hw_ce_regs qcax_ce_regs;
+
+void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
+ u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev);
+
+#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
+#define QCA_REV_9887(ar) ((ar)->hw_rev == ATH10K_HW_QCA9887)
+#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
+#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
+#define QCA_REV_9888(ar) ((ar)->hw_rev == ATH10K_HW_QCA9888)
+#define QCA_REV_9984(ar) ((ar)->hw_rev == ATH10K_HW_QCA9984)
+#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
+#define QCA_REV_40XX(ar) ((ar)->hw_rev == ATH10K_HW_QCA4019)
+#define QCA_REV_WCN3990(ar) ((ar)->hw_rev == ATH10K_HW_WCN3990)
+
+/* Known peculiarities:
+ * - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
+ * - raw have FCS, nwifi doesn't
+ * - ethernet frames have 802.11 header decapped and parts (base hdr, cipher
+ * param, llc/snap) are aligned to 4byte boundaries each
+ */
+enum ath10k_hw_txrx_mode {
+ ATH10K_HW_TXRX_RAW = 0,
+
+ /* Native Wifi decap mode is used to align IP frames to 4-byte
+ * boundaries and avoid a very expensive re-alignment in mac80211.
+ */
+ ATH10K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH10K_HW_TXRX_ETHERNET = 2,
+
+ /* Valid for HTT >= 3.0. Used for management frames in TX_FRM. */
+ ATH10K_HW_TXRX_MGMT = 3,
+};
+
+enum ath10k_mcast2ucast_mode {
+ ATH10K_MCAST2UCAST_DISABLED = 0,
+ ATH10K_MCAST2UCAST_ENABLED = 1,
+};
+
+enum ath10k_hw_rate_ofdm {
+ ATH10K_HW_RATE_OFDM_48M = 0,
+ ATH10K_HW_RATE_OFDM_24M,
+ ATH10K_HW_RATE_OFDM_12M,
+ ATH10K_HW_RATE_OFDM_6M,
+ ATH10K_HW_RATE_OFDM_54M,
+ ATH10K_HW_RATE_OFDM_36M,
+ ATH10K_HW_RATE_OFDM_18M,
+ ATH10K_HW_RATE_OFDM_9M,
+};
+
+enum ath10k_hw_rate_cck {
+ ATH10K_HW_RATE_CCK_LP_11M = 0,
+ ATH10K_HW_RATE_CCK_LP_5_5M,
+ ATH10K_HW_RATE_CCK_LP_2M,
+ ATH10K_HW_RATE_CCK_LP_1M,
+ ATH10K_HW_RATE_CCK_SP_11M,
+ ATH10K_HW_RATE_CCK_SP_5_5M,
+ ATH10K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath10k_hw_rate_rev2_cck {
+ ATH10K_HW_RATE_REV2_CCK_LP_1M = 1,
+ ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ ATH10K_HW_RATE_REV2_CCK_SP_11M,
+};
+
+enum ath10k_hw_cc_wraparound_type {
+ ATH10K_HW_CC_WRAP_DISABLED = 0,
+
+ /* This type is when the HW chip has a quirky Cycle Counter
+ * wraparound which resets to 0x7fffffff instead of 0. All
+ * other CC related counters (e.g. Rx Clear Count) are divided
+ * by 2 so they never wraparound themselves.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_ALL = 1,
+
+ /* Each hw counter wrapsaround independently. When the
+ * counter overflows the repestive counter is right shifted
+ * by 1, i.e reset to 0x7fffffff, and other counters will be
+ * running unaffected. In this type of wraparound, it should
+ * be possible to report accurate Rx busy time unlike the
+ * first type.
+ */
+ ATH10K_HW_CC_WRAP_SHIFTED_EACH = 2,
+};
+
+enum ath10k_hw_refclk_speed {
+ ATH10K_HW_REFCLK_UNKNOWN = -1,
+ ATH10K_HW_REFCLK_48_MHZ = 0,
+ ATH10K_HW_REFCLK_19_2_MHZ = 1,
+ ATH10K_HW_REFCLK_24_MHZ = 2,
+ ATH10K_HW_REFCLK_26_MHZ = 3,
+ ATH10K_HW_REFCLK_37_4_MHZ = 4,
+ ATH10K_HW_REFCLK_38_4_MHZ = 5,
+ ATH10K_HW_REFCLK_40_MHZ = 6,
+ ATH10K_HW_REFCLK_52_MHZ = 7,
+
+ /* must be the last one */
+ ATH10K_HW_REFCLK_COUNT,
+};
+
+struct ath10k_hw_clk_params {
+ u32 refclk;
+ u32 div;
+ u32 rnfrac;
+ u32 settle_time;
+ u32 refdiv;
+ u32 outdiv;
+};
+
+struct ath10k_hw_params {
+ u32 id;
+ u16 dev_id;
+ const char *name;
+ u32 patch_load_addr;
+ int uart_pin;
+ u32 otp_exe_param;
+
+ /* Type of hw cycle counter wraparound logic, for more info
+ * refer enum ath10k_hw_cc_wraparound_type.
+ */
+ enum ath10k_hw_cc_wraparound_type cc_wraparound_type;
+
+ /* Some of chip expects fragment descriptor to be continuous
+ * memory for any TX operation. Set continuous_frag_desc flag
+ * for the hardware which have such requirement.
+ */
+ bool continuous_frag_desc;
+
+ /* CCK hardware rate table mapping for the newer chipsets
+ * like QCA99X0, QCA4019 got revised. The CCK h/w rate values
+ * are in a proper order with respect to the rate/preamble
+ */
+ bool cck_rate_map_rev2;
+
+ u32 channel_counters_freq_hz;
+
+ /* Mgmt tx descriptors threshold for limiting probe response
+ * frames.
+ */
+ u32 max_probe_resp_desc_thres;
+
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 max_spatial_stream;
+ u32 cal_data_len;
+
+ struct ath10k_hw_params_fw {
+ const char *dir;
+ const char *board;
+ size_t board_size;
+ size_t board_ext_size;
+ } fw;
+
+ /* qca99x0 family chips deliver broadcast/multicast management
+ * frames encrypted and expect software do decryption.
+ */
+ bool sw_decrypt_mcast_mgmt;
+
+ const struct ath10k_hw_ops *hw_ops;
+
+ /* Number of bytes used for alignment in rx_hdr_status of rx desc. */
+ int decap_align_bytes;
+
+ /* hw specific clock control parameters */
+ const struct ath10k_hw_clk_params *hw_clk;
+ int target_cpu_freq;
+
+ /* Number of bytes to be discarded for each FFT sample */
+ int spectral_bin_discard;
+
+ /* The board may have a restricted NSS for 160 or 80+80 vs what it
+ * can do for 80Mhz.
+ */
+ int vht160_mcs_rx_highest;
+ int vht160_mcs_tx_highest;
+
+ /* Number of ciphers supported (i.e First N) in cipher_suites array */
+ int n_cipher_suites;
+
+ u32 num_peers;
+ u32 ast_skid_limit;
+ u32 num_wds_entries;
+
+ /* Targets supporting physical addressing capability above 32-bits */
+ bool target_64bit;
+
+ /* Target rx ring fill level */
+ u32 rx_ring_fill_level;
+
+ /* target supporting per ce IRQ */
+ bool per_ce_irq;
+
+ /* target supporting shadow register for ce write */
+ bool shadow_reg_support;
+
+ /* target supporting retention restore on ddr */
+ bool rri_on_ddr;
+
+ /* Number of bytes to be the offset for each FFT sample */
+ int spectral_bin_offset;
+
+ /* targets which require hw filter reset during boot up,
+ * to avoid it sending spurious acks.
+ */
+ bool hw_filter_reset_required;
+};
+
+struct htt_rx_desc;
+
+/* Defines needed for Rx descriptor abstraction */
+struct ath10k_hw_ops {
+ int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd);
+ void (*set_coverage_class)(struct ath10k *ar, s16 value);
+ int (*enable_pll_clk)(struct ath10k *ar);
+};
+
+extern const struct ath10k_hw_ops qca988x_ops;
+extern const struct ath10k_hw_ops qca99x0_ops;
+extern const struct ath10k_hw_ops qca6174_ops;
+extern const struct ath10k_hw_ops wcn3990_ops;
+
+extern const struct ath10k_hw_clk_params qca6174_clk[];
+
+static inline int
+ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
+ struct htt_rx_desc *rxd)
+{
+ if (hw->hw_ops->rx_desc_get_l3_pad_bytes)
+ return hw->hw_ops->rx_desc_get_l3_pad_bytes(rxd);
+ return 0;
+}
+
+/* Target specific defines for MAIN firmware */
+#define TARGET_NUM_VDEVS 8
+#define TARGET_NUM_PEER_AST 2
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 0
+#define TARGET_MAC_AGGR_DELIM 0
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_STATIONS 16
+#define TARGET_NUM_PEERS ((TARGET_NUM_STATIONS) + \
+ (TARGET_NUM_VDEVS))
+#define TARGET_NUM_OFFLOAD_PEERS 0
+#define TARGET_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS ((TARGET_NUM_PEERS) * 2)
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 0
+#define TARGET_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 0
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_MAX_FRAG_ENTRIES 0
+
+/* Target specific defines for 10.X firmware */
+#define TARGET_10X_NUM_VDEVS 16
+#define TARGET_10X_NUM_PEER_AST 2
+#define TARGET_10X_NUM_WDS_ENTRIES 32
+#define TARGET_10X_DMA_BURST_SIZE 0
+#define TARGET_10X_MAC_AGGR_DELIM 0
+#define TARGET_10X_AST_SKID_LIMIT 128
+#define TARGET_10X_NUM_STATIONS 128
+#define TARGET_10X_TX_STATS_NUM_STATIONS 118
+#define TARGET_10X_NUM_PEERS ((TARGET_10X_NUM_STATIONS) + \
+ (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_TX_STATS_NUM_PEERS ((TARGET_10X_TX_STATS_NUM_STATIONS) + \
+ (TARGET_10X_NUM_VDEVS))
+#define TARGET_10X_NUM_OFFLOAD_PEERS 0
+#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
+#define TARGET_10X_NUM_PEER_KEYS 2
+#define TARGET_10X_NUM_TIDS_MAX 256
+#define TARGET_10X_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
+ (TARGET_10X_NUM_PEERS) * 2)
+#define TARGET_10X_TX_STATS_NUM_TIDS min((TARGET_10X_NUM_TIDS_MAX), \
+ (TARGET_10X_TX_STATS_NUM_PEERS) * 2)
+#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
+#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
+#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
+#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
+#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
+#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10X_NUM_MCAST_GROUPS 0
+#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
+#define TARGET_10X_TX_DBG_LOG_SIZE 1024
+#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10X_VOW_CONFIG 0
+#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10X_MAX_FRAG_ENTRIES 0
+
+/* 10.2 parameters */
+#define TARGET_10_2_DMA_BURST_SIZE 0
+
+/* Target specific defines for WMI-TLV firmware */
+#define TARGET_TLV_NUM_VDEVS 4
+#define TARGET_TLV_NUM_STATIONS 32
+#define TARGET_TLV_NUM_PEERS 33
+#define TARGET_TLV_NUM_TDLS_VDEVS 1
+#define TARGET_TLV_NUM_TIDS ((TARGET_TLV_NUM_PEERS) * 2)
+#define TARGET_TLV_NUM_MSDU_DESC (1024 + 32)
+#define TARGET_TLV_NUM_WOW_PATTERNS 22
+#define TARGET_TLV_MGMT_NUM_MSDU_DESC (50)
+
+/* Target specific defines for WMI-HL-1.0 firmware */
+#define TARGET_HL_10_TLV_NUM_PEERS 14
+#define TARGET_HL_10_TLV_AST_SKID_LIMIT 6
+#define TARGET_HL_10_TLV_NUM_WDS_ENTRIES 2
+
+/* Diagnostic Window */
+#define CE_DIAG_PIPE 7
+
+#define NUM_TARGET_CE_CONFIG_WLAN ar->hw_values->num_target_ce_config_wlan
+
+/* Target specific defines for 10.4 firmware */
+#define TARGET_10_4_NUM_VDEVS 16
+#define TARGET_10_4_NUM_STATIONS 32
+#define TARGET_10_4_NUM_PEERS ((TARGET_10_4_NUM_STATIONS) + \
+ (TARGET_10_4_NUM_VDEVS))
+#define TARGET_10_4_ACTIVE_PEERS 0
+
+#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC 35
+#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
+#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
+#define TARGET_10_4_NUM_PEER_KEYS 2
+#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10_4_NUM_MSDU_DESC_PFC 2500
+#define TARGET_10_4_AST_SKID_LIMIT 32
+
+/* 100 ms for video, best-effort, and background */
+#define TARGET_10_4_RX_TIMEOUT_LO_PRI 100
+
+/* 40 ms for voice */
+#define TARGET_10_4_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_10_4_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
+#define TARGET_10_4_SCAN_MAX_REQS 4
+#define TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES 8
+
+/* Note: mcast to ucast is disabled by default */
+#define TARGET_10_4_NUM_MCAST_GROUPS 0
+#define TARGET_10_4_NUM_MCAST_TABLE_ELEMS 0
+#define TARGET_10_4_MCAST2UCAST_MODE 0
+
+#define TARGET_10_4_TX_DBG_LOG_SIZE 1024
+#define TARGET_10_4_NUM_WDS_ENTRIES 32
+#define TARGET_10_4_DMA_BURST_SIZE 1
+#define TARGET_10_4_MAC_AGGR_DELIM 0
+#define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_10_4_VOW_CONFIG 0
+#define TARGET_10_4_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_10_4_11AC_TX_MAX_FRAGS 2
+#define TARGET_10_4_MAX_PEER_EXT_STATS 16
+#define TARGET_10_4_SMART_ANT_CAP 0
+#define TARGET_10_4_BK_MIN_FREE 0
+#define TARGET_10_4_BE_MIN_FREE 0
+#define TARGET_10_4_VI_MIN_FREE 0
+#define TARGET_10_4_VO_MIN_FREE 0
+#define TARGET_10_4_RX_BATCH_MODE 1
+#define TARGET_10_4_THERMAL_THROTTLING_CONFIG 0
+#define TARGET_10_4_ATF_CONFIG 0
+#define TARGET_10_4_IPHDR_PAD_CONFIG 1
+#define TARGET_10_4_QWRAP_CONFIG 0
+
+/* TDLS config */
+#define TARGET_10_4_NUM_TDLS_VDEVS 1
+#define TARGET_10_4_NUM_TDLS_BUFFER_STA 1
+#define TARGET_10_4_NUM_TDLS_SLEEP_STA 1
+
+/* Maximum number of Copy Engine's supported */
+#define CE_COUNT_MAX 12
+
+/* Number of Copy Engines supported */
+#define CE_COUNT ar->hw_values->ce_count
+
+/*
+ * Granted MSIs are assigned as follows:
+ * Firmware uses the first
+ * Remaining MSIs, if any, are used by Copy Engines
+ * This mapping is known to both Target firmware and Host software.
+ * It may be changed as long as Host and Target are kept in sync.
+ */
+/* MSI for firmware (errors, etc.) */
+#define MSI_ASSIGN_FW 0
+
+/* MSIs for Copy Engines */
+#define MSI_ASSIGN_CE_INITIAL 1
+#define MSI_ASSIGN_CE_MAX ar->hw_values->msi_assign_ce_max
+
+/* as of IP3.7.1 */
+#define RTC_STATE_V_ON ar->hw_values->rtc_state_val_on
+
+#define RTC_STATE_V_LSB 0
+#define RTC_STATE_V_MASK 0x00000007
+#define RTC_STATE_ADDRESS 0x0000
+#define PCIE_SOC_WAKE_V_MASK 0x00000001
+#define PCIE_SOC_WAKE_ADDRESS 0x0004
+#define PCIE_SOC_WAKE_RESET 0x00000000
+#define SOC_GLOBAL_RESET_ADDRESS 0x0008
+
+#define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
+#define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
+#define MAC_COEX_BASE_ADDRESS 0x00006000
+#define BT_COEX_BASE_ADDRESS 0x00007000
+#define SOC_PCIE_BASE_ADDRESS 0x00008000
+#define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
+#define WLAN_UART_BASE_ADDRESS 0x0000c000
+#define WLAN_SI_BASE_ADDRESS 0x00010000
+#define WLAN_GPIO_BASE_ADDRESS 0x00014000
+#define WLAN_ANALOG_INTF_BASE_ADDRESS 0x0001c000
+#define WLAN_MAC_BASE_ADDRESS ar->regs->wlan_mac_base_address
+#define EFUSE_BASE_ADDRESS 0x00030000
+#define FPGA_REG_BASE_ADDRESS 0x00039000
+#define WLAN_UART2_BASE_ADDRESS 0x00054c00
+#define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
+#define CE0_BASE_ADDRESS ar->regs->ce0_base_address
+#define CE1_BASE_ADDRESS ar->regs->ce1_base_address
+#define CE2_BASE_ADDRESS ar->regs->ce2_base_address
+#define CE3_BASE_ADDRESS ar->regs->ce3_base_address
+#define CE4_BASE_ADDRESS ar->regs->ce4_base_address
+#define CE5_BASE_ADDRESS ar->regs->ce5_base_address
+#define CE6_BASE_ADDRESS ar->regs->ce6_base_address
+#define CE7_BASE_ADDRESS ar->regs->ce7_base_address
+#define DBI_BASE_ADDRESS 0x00060000
+#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
+#define PCIE_LOCAL_BASE_ADDRESS ar->regs->pcie_local_base_address
+
+#define SOC_RESET_CONTROL_ADDRESS 0x00000000
+#define SOC_RESET_CONTROL_OFFSET 0x00000000
+#define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
+#define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
+#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
+#define SOC_CPU_CLOCK_OFFSET 0x00000020
+#define SOC_CPU_CLOCK_STANDARD_LSB 0
+#define SOC_CPU_CLOCK_STANDARD_MASK 0x00000003
+#define SOC_CLOCK_CONTROL_OFFSET 0x00000028
+#define SOC_CLOCK_CONTROL_SI0_CLK_MASK 0x00000001
+#define SOC_SYSTEM_SLEEP_OFFSET 0x000000c4
+#define SOC_LPO_CAL_OFFSET 0x000000e0
+#define SOC_LPO_CAL_ENABLE_LSB 20
+#define SOC_LPO_CAL_ENABLE_MASK 0x00100000
+#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
+#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
+
+#define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
+#define SOC_CHIP_ID_REV_LSB 8
+#define SOC_CHIP_ID_REV_MASK 0x00000f00
+
+#define WLAN_RESET_CONTROL_COLD_RST_MASK 0x00000008
+#define WLAN_RESET_CONTROL_WARM_RST_MASK 0x00000004
+#define WLAN_SYSTEM_SLEEP_DISABLE_LSB 0
+#define WLAN_SYSTEM_SLEEP_DISABLE_MASK 0x00000001
+
+#define WLAN_GPIO_PIN0_ADDRESS 0x00000028
+#define WLAN_GPIO_PIN0_CONFIG_LSB 11
+#define WLAN_GPIO_PIN0_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN0_PAD_PULL_LSB 5
+#define WLAN_GPIO_PIN0_PAD_PULL_MASK 0x00000060
+#define WLAN_GPIO_PIN1_ADDRESS 0x0000002c
+#define WLAN_GPIO_PIN1_CONFIG_MASK 0x00007800
+#define WLAN_GPIO_PIN10_ADDRESS 0x00000050
+#define WLAN_GPIO_PIN11_ADDRESS 0x00000054
+#define WLAN_GPIO_PIN12_ADDRESS 0x00000058
+#define WLAN_GPIO_PIN13_ADDRESS 0x0000005c
+
+#define CLOCK_GPIO_OFFSET 0xffffffff
+#define CLOCK_GPIO_BT_CLK_OUT_EN_LSB 0
+#define CLOCK_GPIO_BT_CLK_OUT_EN_MASK 0
+
+#define SI_CONFIG_OFFSET 0x00000000
+#define SI_CONFIG_ERR_INT_LSB 19
+#define SI_CONFIG_ERR_INT_MASK 0x00080000
+#define SI_CONFIG_BIDIR_OD_DATA_LSB 18
+#define SI_CONFIG_BIDIR_OD_DATA_MASK 0x00040000
+#define SI_CONFIG_I2C_LSB 16
+#define SI_CONFIG_I2C_MASK 0x00010000
+#define SI_CONFIG_POS_SAMPLE_LSB 7
+#define SI_CONFIG_POS_SAMPLE_MASK 0x00000080
+#define SI_CONFIG_INACTIVE_DATA_LSB 5
+#define SI_CONFIG_INACTIVE_DATA_MASK 0x00000020
+#define SI_CONFIG_INACTIVE_CLK_LSB 4
+#define SI_CONFIG_INACTIVE_CLK_MASK 0x00000010
+#define SI_CONFIG_DIVIDER_LSB 0
+#define SI_CONFIG_DIVIDER_MASK 0x0000000f
+#define SI_CS_OFFSET 0x00000004
+#define SI_CS_DONE_ERR_LSB 10
+#define SI_CS_DONE_ERR_MASK 0x00000400
+#define SI_CS_DONE_INT_LSB 9
+#define SI_CS_DONE_INT_MASK 0x00000200
+#define SI_CS_START_LSB 8
+#define SI_CS_START_MASK 0x00000100
+#define SI_CS_RX_CNT_LSB 4
+#define SI_CS_RX_CNT_MASK 0x000000f0
+#define SI_CS_TX_CNT_LSB 0
+#define SI_CS_TX_CNT_MASK 0x0000000f
+
+#define SI_TX_DATA0_OFFSET 0x00000008
+#define SI_TX_DATA1_OFFSET 0x0000000c
+#define SI_RX_DATA0_OFFSET 0x00000010
+#define SI_RX_DATA1_OFFSET 0x00000014
+
+#define CORE_CTRL_CPU_INTR_MASK 0x00002000
+#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800
+#define CORE_CTRL_ADDRESS 0x0000
+#define PCIE_INTR_ENABLE_ADDRESS 0x0008
+#define PCIE_INTR_CAUSE_ADDRESS 0x000c
+#define PCIE_INTR_CLR_ADDRESS ar->regs->pcie_intr_clr_address
+#define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
+#define CPU_INTR_ADDRESS 0x0010
+#define FW_RAM_CONFIG_ADDRESS 0x0018
+
+#define CCNT_TO_MSEC(ar, x) ((x) / ar->hw_params.channel_counters_freq_hz)
+
+/* Firmware indications to the Host via SCRATCH_3 register. */
+#define FW_INDICATOR_ADDRESS ar->regs->fw_indicator_address
+#define FW_IND_EVENT_PENDING 1
+#define FW_IND_INITIALIZED 2
+#define FW_IND_HOST_READY 0x80000000
+
+/* HOST_REG interrupt from firmware */
+#define PCIE_INTR_FIRMWARE_MASK ar->regs->pcie_intr_fw_mask
+#define PCIE_INTR_CE_MASK_ALL ar->regs->pcie_intr_ce_mask_all
+
+#define DRAM_BASE_ADDRESS 0x00400000
+
+#define PCIE_BAR_REG_ADDRESS 0x40030
+
+#define MISSING 0
+
+#define SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_SYSTEM_SLEEP_OFFSET SOC_SYSTEM_SLEEP_OFFSET
+#define WLAN_RESET_CONTROL_OFFSET SOC_RESET_CONTROL_OFFSET
+#define CLOCK_CONTROL_OFFSET SOC_CLOCK_CONTROL_OFFSET
+#define CLOCK_CONTROL_SI0_CLK_MASK SOC_CLOCK_CONTROL_SI0_CLK_MASK
+#define RESET_CONTROL_MBOX_RST_MASK MISSING
+#define RESET_CONTROL_SI0_RST_MASK SOC_RESET_CONTROL_SI0_RST_MASK
+#define GPIO_BASE_ADDRESS WLAN_GPIO_BASE_ADDRESS
+#define GPIO_PIN0_OFFSET WLAN_GPIO_PIN0_ADDRESS
+#define GPIO_PIN1_OFFSET WLAN_GPIO_PIN1_ADDRESS
+#define GPIO_PIN0_CONFIG_LSB WLAN_GPIO_PIN0_CONFIG_LSB
+#define GPIO_PIN0_CONFIG_MASK WLAN_GPIO_PIN0_CONFIG_MASK
+#define GPIO_PIN0_PAD_PULL_LSB WLAN_GPIO_PIN0_PAD_PULL_LSB
+#define GPIO_PIN0_PAD_PULL_MASK WLAN_GPIO_PIN0_PAD_PULL_MASK
+#define GPIO_PIN1_CONFIG_MASK WLAN_GPIO_PIN1_CONFIG_MASK
+#define SI_BASE_ADDRESS WLAN_SI_BASE_ADDRESS
+#define SCRATCH_BASE_ADDRESS SOC_CORE_BASE_ADDRESS
+#define LOCAL_SCRATCH_OFFSET 0x18
+#define CPU_CLOCK_OFFSET SOC_CPU_CLOCK_OFFSET
+#define LPO_CAL_OFFSET SOC_LPO_CAL_OFFSET
+#define GPIO_PIN10_OFFSET WLAN_GPIO_PIN10_ADDRESS
+#define GPIO_PIN11_OFFSET WLAN_GPIO_PIN11_ADDRESS
+#define GPIO_PIN12_OFFSET WLAN_GPIO_PIN12_ADDRESS
+#define GPIO_PIN13_OFFSET WLAN_GPIO_PIN13_ADDRESS
+#define CPU_CLOCK_STANDARD_LSB SOC_CPU_CLOCK_STANDARD_LSB
+#define CPU_CLOCK_STANDARD_MASK SOC_CPU_CLOCK_STANDARD_MASK
+#define LPO_CAL_ENABLE_LSB SOC_LPO_CAL_ENABLE_LSB
+#define LPO_CAL_ENABLE_MASK SOC_LPO_CAL_ENABLE_MASK
+#define ANALOG_INTF_BASE_ADDRESS WLAN_ANALOG_INTF_BASE_ADDRESS
+#define MBOX_BASE_ADDRESS MISSING
+#define INT_STATUS_ENABLE_ERROR_LSB MISSING
+#define INT_STATUS_ENABLE_ERROR_MASK MISSING
+#define INT_STATUS_ENABLE_CPU_LSB MISSING
+#define INT_STATUS_ENABLE_CPU_MASK MISSING
+#define INT_STATUS_ENABLE_COUNTER_LSB MISSING
+#define INT_STATUS_ENABLE_COUNTER_MASK MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_LSB MISSING
+#define INT_STATUS_ENABLE_MBOX_DATA_MASK MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB MISSING
+#define ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB MISSING
+#define ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_LSB MISSING
+#define COUNTER_INT_STATUS_ENABLE_BIT_MASK MISSING
+#define INT_STATUS_ENABLE_ADDRESS MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_LSB MISSING
+#define CPU_INT_STATUS_ENABLE_BIT_MASK MISSING
+#define HOST_INT_STATUS_ADDRESS MISSING
+#define CPU_INT_STATUS_ADDRESS MISSING
+#define ERROR_INT_STATUS_ADDRESS MISSING
+#define ERROR_INT_STATUS_WAKEUP_MASK MISSING
+#define ERROR_INT_STATUS_WAKEUP_LSB MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_MASK MISSING
+#define ERROR_INT_STATUS_RX_UNDERFLOW_LSB MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_MASK MISSING
+#define ERROR_INT_STATUS_TX_OVERFLOW_LSB MISSING
+#define COUNT_DEC_ADDRESS MISSING
+#define HOST_INT_STATUS_CPU_MASK MISSING
+#define HOST_INT_STATUS_CPU_LSB MISSING
+#define HOST_INT_STATUS_ERROR_MASK MISSING
+#define HOST_INT_STATUS_ERROR_LSB MISSING
+#define HOST_INT_STATUS_COUNTER_MASK MISSING
+#define HOST_INT_STATUS_COUNTER_LSB MISSING
+#define RX_LOOKAHEAD_VALID_ADDRESS MISSING
+#define WINDOW_DATA_ADDRESS MISSING
+#define WINDOW_READ_ADDR_ADDRESS MISSING
+#define WINDOW_WRITE_ADDR_ADDRESS MISSING
+
+#define QCA9887_1_0_I2C_SDA_GPIO_PIN 5
+#define QCA9887_1_0_I2C_SDA_PIN_CONFIG 3
+#define QCA9887_1_0_SI_CLK_GPIO_PIN 17
+#define QCA9887_1_0_SI_CLK_PIN_CONFIG 3
+#define QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS 0x00000010
+
+#define QCA9887_EEPROM_SELECT_READ 0xa10000a0
+#define QCA9887_EEPROM_ADDR_HI_MASK 0x0000ff00
+#define QCA9887_EEPROM_ADDR_HI_LSB 8
+#define QCA9887_EEPROM_ADDR_LO_MASK 0x00ff0000
+#define QCA9887_EEPROM_ADDR_LO_LSB 16
+
+#define MBOX_RESET_CONTROL_ADDRESS 0x00000000
+#define MBOX_HOST_INT_STATUS_ADDRESS 0x00000800
+#define MBOX_HOST_INT_STATUS_ERROR_LSB 7
+#define MBOX_HOST_INT_STATUS_ERROR_MASK 0x00000080
+#define MBOX_HOST_INT_STATUS_CPU_LSB 6
+#define MBOX_HOST_INT_STATUS_CPU_MASK 0x00000040
+#define MBOX_HOST_INT_STATUS_COUNTER_LSB 4
+#define MBOX_HOST_INT_STATUS_COUNTER_MASK 0x00000010
+#define MBOX_CPU_INT_STATUS_ADDRESS 0x00000801
+#define MBOX_ERROR_INT_STATUS_ADDRESS 0x00000802
+#define MBOX_ERROR_INT_STATUS_WAKEUP_LSB 2
+#define MBOX_ERROR_INT_STATUS_WAKEUP_MASK 0x00000004
+#define MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_LSB 1
+#define MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK 0x00000002
+#define MBOX_ERROR_INT_STATUS_TX_OVERFLOW_LSB 0
+#define MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK 0x00000001
+#define MBOX_COUNTER_INT_STATUS_ADDRESS 0x00000803
+#define MBOX_COUNTER_INT_STATUS_COUNTER_LSB 0
+#define MBOX_COUNTER_INT_STATUS_COUNTER_MASK 0x000000ff
+#define MBOX_RX_LOOKAHEAD_VALID_ADDRESS 0x00000805
+#define MBOX_INT_STATUS_ENABLE_ADDRESS 0x00000828
+#define MBOX_INT_STATUS_ENABLE_ERROR_LSB 7
+#define MBOX_INT_STATUS_ENABLE_ERROR_MASK 0x00000080
+#define MBOX_INT_STATUS_ENABLE_CPU_LSB 6
+#define MBOX_INT_STATUS_ENABLE_CPU_MASK 0x00000040
+#define MBOX_INT_STATUS_ENABLE_INT_LSB 5
+#define MBOX_INT_STATUS_ENABLE_INT_MASK 0x00000020
+#define MBOX_INT_STATUS_ENABLE_COUNTER_LSB 4
+#define MBOX_INT_STATUS_ENABLE_COUNTER_MASK 0x00000010
+#define MBOX_INT_STATUS_ENABLE_MBOX_DATA_LSB 0
+#define MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK 0x0000000f
+#define MBOX_CPU_INT_STATUS_ENABLE_ADDRESS 0x00000819
+#define MBOX_CPU_INT_STATUS_ENABLE_BIT_LSB 0
+#define MBOX_CPU_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define MBOX_ERROR_STATUS_ENABLE_ADDRESS 0x0000081a
+#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_LSB 1
+#define MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK 0x00000002
+#define MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_LSB 0
+#define MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK 0x00000001
+#define MBOX_COUNTER_INT_STATUS_ENABLE_ADDRESS 0x0000081b
+#define MBOX_COUNTER_INT_STATUS_ENABLE_BIT_LSB 0
+#define MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK 0x000000ff
+#define MBOX_COUNT_ADDRESS 0x00000820
+#define MBOX_COUNT_DEC_ADDRESS 0x00000840
+#define MBOX_WINDOW_DATA_ADDRESS 0x00000874
+#define MBOX_WINDOW_WRITE_ADDR_ADDRESS 0x00000878
+#define MBOX_WINDOW_READ_ADDR_ADDRESS 0x0000087c
+#define MBOX_CPU_DBG_SEL_ADDRESS 0x00000883
+#define MBOX_CPU_DBG_ADDRESS 0x00000884
+#define MBOX_RTC_BASE_ADDRESS 0x00000000
+#define MBOX_GPIO_BASE_ADDRESS 0x00005000
+#define MBOX_MBOX_BASE_ADDRESS 0x00008000
+
+#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+
+/* Register definitions for first generation ath10k cards. These cards include
+ * a mac thich has a register allocation similar to ath9k and at least some
+ * registers including the ones relevant for modifying the coverage class are
+ * identical to the ath9k definitions.
+ * These registers are usually managed by the ath10k firmware. However by
+ * overriding them it is possible to support coverage class modifications.
+ */
+#define WAVE1_PCU_ACK_CTS_TIMEOUT 0x8014
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_MAX 0x00003FFF
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_ACK_MASK 0x00003FFF
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_ACK_LSB 0
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_CTS_MASK 0x3FFF0000
+#define WAVE1_PCU_ACK_CTS_TIMEOUT_CTS_LSB 16
+
+#define WAVE1_PCU_GBL_IFS_SLOT 0x1070
+#define WAVE1_PCU_GBL_IFS_SLOT_MASK 0x0000FFFF
+#define WAVE1_PCU_GBL_IFS_SLOT_MAX 0x0000FFFF
+#define WAVE1_PCU_GBL_IFS_SLOT_LSB 0
+#define WAVE1_PCU_GBL_IFS_SLOT_RESV0 0xFFFF0000
+
+#define WAVE1_PHYCLK 0x801C
+#define WAVE1_PHYCLK_USEC_MASK 0x0000007F
+#define WAVE1_PHYCLK_USEC_LSB 0
+
+/* qca6174 PLL offset/mask */
+#define SOC_CORE_CLK_CTRL_OFFSET 0x00000114
+#define SOC_CORE_CLK_CTRL_DIV_LSB 0
+#define SOC_CORE_CLK_CTRL_DIV_MASK 0x00000007
+
+#define EFUSE_OFFSET 0x0000032c
+#define EFUSE_XTAL_SEL_LSB 8
+#define EFUSE_XTAL_SEL_MASK 0x00000700
+
+#define BB_PLL_CONFIG_OFFSET 0x000002f4
+#define BB_PLL_CONFIG_FRAC_LSB 0
+#define BB_PLL_CONFIG_FRAC_MASK 0x0003ffff
+#define BB_PLL_CONFIG_OUTDIV_LSB 18
+#define BB_PLL_CONFIG_OUTDIV_MASK 0x001c0000
+
+#define WLAN_PLL_SETTLE_OFFSET 0x0018
+#define WLAN_PLL_SETTLE_TIME_LSB 0
+#define WLAN_PLL_SETTLE_TIME_MASK 0x000007ff
+
+#define WLAN_PLL_CONTROL_OFFSET 0x0014
+#define WLAN_PLL_CONTROL_DIV_LSB 0
+#define WLAN_PLL_CONTROL_DIV_MASK 0x000003ff
+#define WLAN_PLL_CONTROL_REFDIV_LSB 10
+#define WLAN_PLL_CONTROL_REFDIV_MASK 0x00003c00
+#define WLAN_PLL_CONTROL_BYPASS_LSB 16
+#define WLAN_PLL_CONTROL_BYPASS_MASK 0x00010000
+#define WLAN_PLL_CONTROL_NOPWD_LSB 18
+#define WLAN_PLL_CONTROL_NOPWD_MASK 0x00040000
+
+#define RTC_SYNC_STATUS_OFFSET 0x0244
+#define RTC_SYNC_STATUS_PLL_CHANGING_LSB 5
+#define RTC_SYNC_STATUS_PLL_CHANGING_MASK 0x00000020
+/* qca6174 PLL offset/mask end */
+
+#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
new file mode 100644
index 000000000..6e4096fd6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -0,0 +1,8622 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/cfg80211.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <linux/acpi.h>
+
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "htt.h"
+#include "txrx.h"
+#include "testmode.h"
+#include "wmi.h"
+#include "wmi-tlv.h"
+#include "wmi-ops.h"
+#include "wow.h"
+
+/*********/
+/* Rates */
+/*********/
+
+static struct ieee80211_rate ath10k_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+static struct ieee80211_rate ath10k_rates_rev2[] = {
+ { .bitrate = 10,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
+ .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
+};
+
+#define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
+
+#define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX)
+#define ath10k_g_rates (ath10k_rates + 0)
+#define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
+
+#define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
+#define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
+
+#define ath10k_wmi_legacy_rates ath10k_rates
+
+static bool ath10k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+static u8 ath10k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u32
+ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
+{
+ enum wmi_host_platform_type platform_type;
+ int ret;
+
+ if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
+ platform_type = WMI_HOST_PLATFORM_LOW_PERF;
+ else
+ platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
+
+ ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
+
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**********/
+/* Crypto */
+/**********/
+
+static int ath10k_send_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ struct ath10k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .key_flags = flags,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ arg.key_cipher = WMI_CIPHER_WEP;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ WARN_ON(1);
+ return -EINVAL;
+ default:
+ ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ if (cmd == DISABLE_KEY) {
+ arg.key_cipher = WMI_CIPHER_NONE;
+ arg.key_data = NULL;
+ }
+
+ return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
+}
+
+static int ath10k_install_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ if (arvif->nohwcrypt)
+ return 1;
+
+ ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
+ if (ret)
+ return ret;
+
+ time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int ret;
+ int i;
+ u32 flags;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
+ arvif->vif->type != NL80211_IFTYPE_ADHOC &&
+ arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
+ return -EINVAL;
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
+ if (arvif->wep_keys[i] == NULL)
+ continue;
+
+ switch (arvif->vif->type) {
+ case NL80211_IFTYPE_AP:
+ flags = WMI_KEY_PAIRWISE;
+
+ if (arvif->def_wep_key_idx == i)
+ flags |= WMI_KEY_TX_USAGE;
+
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+ SET_KEY, addr, flags);
+ if (ret < 0)
+ return ret;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+ SET_KEY, addr,
+ WMI_KEY_PAIRWISE);
+ if (ret < 0)
+ return ret;
+
+ ret = ath10k_install_key(arvif, arvif->wep_keys[i],
+ SET_KEY, addr, WMI_KEY_GROUP);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ peer->keys[i] = arvif->wep_keys[i];
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ /* In some cases (notably with static WEP IBSS with multiple keys)
+ * multicast Tx becomes broken. Both pairwise and groupwise keys are
+ * installed already. Using WMI_KEY_TX_USAGE in different combinations
+ * didn't seem help. Using def_keyid vdev parameter seems to be
+ * effective so use that.
+ *
+ * FIXME: Revisit. Perhaps this can be done in a less hacky way.
+ */
+ if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
+ return 0;
+
+ if (arvif->def_wep_key_idx == -1)
+ return 0;
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ arvif->def_wep_key_idx);
+ if (ret) {
+ ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] == NULL)
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath10k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ar->data_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return first_errno;
+}
+
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx)
+{
+ struct ath10k_peer *peer;
+ int i;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ /* We don't know which vdev this peer belongs to,
+ * since WMI doesn't give us that information.
+ *
+ * FIXME: multi-bss needs to be handled.
+ */
+ peer = ath10k_peer_find(ar, 0, addr);
+ if (!peer)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
+ return true;
+ }
+
+ return false;
+}
+
+static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ u8 addr[ETH_ALEN];
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (;;) {
+ /* since ath10k_install_key we can't hold data_lock all the
+ * time, so we try to remove the keys incrementally
+ */
+ spin_lock_bh(&ar->data_lock);
+ i = 0;
+ list_for_each_entry(peer, &ar->peers, list) {
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (peer->keys[i] == key) {
+ ether_addr_copy(addr, peer->addr);
+ peer->keys[i] = NULL;
+ break;
+ }
+ }
+
+ if (i < ARRAY_SIZE(peer->keys))
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (i == ARRAY_SIZE(peer->keys))
+ break;
+ /* key flags are not required to delete the key */
+ ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret)
+ ath10k_warn(ar, "failed to remove key for %pM: %d\n",
+ addr, ret);
+ }
+
+ return first_errno;
+}
+
+static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_peer *peer;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(peer, &ar->peers, list) {
+ if (ether_addr_equal(peer->addr, arvif->vif->addr))
+ continue;
+
+ if (ether_addr_equal(peer->addr, arvif->bssid))
+ continue;
+
+ if (peer->keys[key->keyidx] == key)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
+ arvif->vdev_id, key->keyidx);
+
+ ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
+ arvif->vdev_id, peer->addr, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*********************/
+/* General utilities */
+/*********************/
+
+static inline enum wmi_phy_mode
+chan_to_phymode(const struct cfg80211_chan_def *chandef)
+{
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ switch (chandef->chan->band) {
+ case NL80211_BAND_2GHZ:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
+ phymode = MODE_11B;
+ else
+ phymode = MODE_11G;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ phymode = MODE_11NG_HT20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ phymode = MODE_11NG_HT40;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ case NL80211_CHAN_WIDTH_80:
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ phymode = MODE_UNKNOWN;
+ break;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ phymode = MODE_11A;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ phymode = MODE_11NA_HT20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ phymode = MODE_11NA_HT40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ phymode = MODE_11AC_VHT80;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ phymode = MODE_11AC_VHT160;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ phymode = MODE_11AC_VHT80_80;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ phymode = MODE_UNKNOWN;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(phymode == MODE_UNKNOWN);
+ return phymode;
+}
+
+static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
+{
+/*
+ * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ * 1 microsecond
+ */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ int *num = data;
+
+ (*num)++;
+}
+
+static int ath10k_mac_num_chanctxs(struct ath10k *ar)
+{
+ int num = 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_num_chanctxs_iter,
+ &num);
+
+ return num;
+}
+
+static void
+ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath10k_peer_create(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 vdev_id,
+ const u8 *addr,
+ enum wmi_peer_type peer_type)
+{
+ struct ath10k_vif *arvif;
+ struct ath10k_peer *peer;
+ int num_peers = 0;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ num_peers = ar->num_peers;
+
+ /* Each vdev consumes a peer entry as well */
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ num_peers++;
+
+ if (num_peers >= ar->max_num_peers)
+ return -ENOBUFS;
+
+ ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
+ if (ret) {
+ ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
+ addr, vdev_id, ret);
+ return ret;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, vdev_id, addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
+ addr, vdev_id);
+ ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ return -ENOENT;
+ }
+
+ peer->vif = vif;
+ peer->sta = sta;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ar->num_peers++;
+
+ return 0;
+}
+
+static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ param = ar->wmi.pdev_param->sta_kickout_th;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ ATH10K_KICKOUT_THRESHOLD);
+ if (ret) {
+ ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
+ ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
+ vdev_param = ar->wmi.vdev_param->rts_threshold;
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
+}
+
+static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_peer *peer, *tmp;
+ int peer_id;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
+ /* Double check that peer is properly un-referenced from
+ * the peer_map
+ */
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ if (ar->peer_map[i] == peer) {
+ ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
+ peer->addr, peer, i);
+ ar->peer_map[i] = NULL;
+ }
+ }
+
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_peer_cleanup_all(struct ath10k *ar)
+{
+ struct ath10k_peer *peer, *tmp;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+ list_del(&peer->list);
+ kfree(peer);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
+ ar->peer_map[i] = NULL;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
+ struct ieee80211_sta *sta,
+ enum wmi_tdls_peer_state state)
+{
+ int ret;
+ struct wmi_tdls_peer_update_cmd_arg arg = {};
+ struct wmi_tdls_peer_capab_arg cap = {};
+ struct wmi_channel_arg chan_arg = {};
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arg.vdev_id = vdev_id;
+ arg.peer_state = state;
+ ether_addr_copy(arg.addr, sta->addr);
+
+ cap.peer_max_sp = sta->max_sp;
+ cap.peer_uapsd_queues = sta->uapsd_queues;
+
+ if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
+ !sta->tdls_initiator)
+ cap.is_peer_responder = 1;
+
+ ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
+ arg.addr, vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/************************/
+/* Interface management */
+/************************/
+
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!arvif->beacon)
+ return;
+
+ if (!arvif->beacon_buf)
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
+ arvif->beacon->len, DMA_TO_DEVICE);
+
+ if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
+ arvif->beacon_state != ATH10K_BEACON_SENT))
+ return;
+
+ dev_kfree_skb_any(arvif->beacon);
+
+ arvif->beacon = NULL;
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+}
+
+static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_mac_vif_beacon_free(arvif);
+
+ if (arvif->beacon_buf) {
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf, arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+}
+
+static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH10K_VDEV_SETUP_TIMEOUT_HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status;
+}
+
+static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
+{
+ struct cfg80211_chan_def *chandef = NULL;
+ struct ieee80211_channel *channel = NULL;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &chandef);
+ if (WARN_ON_ONCE(!chandef))
+ return -ENOENT;
+
+ channel = chandef->chan;
+
+ arg.vdev_id = vdev_id;
+ arg.channel.freq = channel->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+
+ /* TODO setup this dynamically, what in case we
+ * don't have any vifs?
+ */
+ arg.channel.mode = chan_to_phymode(chandef);
+ arg.channel.chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ goto vdev_stop;
+ }
+
+ ar->monitor_vdev_id = vdev_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
+ ar->monitor_vdev_id);
+ return 0;
+
+vdev_stop:
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ return ret;
+}
+
+static int ath10k_monitor_vdev_stop(struct ath10k *ar)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+ return ret;
+}
+
+static int ath10k_monitor_vdev_create(struct ath10k *ar)
+{
+ int bit, ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->free_vdev_map == 0) {
+ ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
+ WMI_VDEV_TYPE_MONITOR,
+ 0, ar->mac_addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath10k_monitor_vdev_delete(struct ath10k *ar)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+ return ret;
+}
+
+static int ath10k_monitor_start(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_monitor_vdev_create(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
+ ath10k_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ ar->monitor_started = true;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
+
+ return 0;
+}
+
+static int ath10k_monitor_stop(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_monitor_vdev_stop(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_monitor_vdev_delete(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ ar->monitor_started = false;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
+
+ return 0;
+}
+
+static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ /* At least one chanctx is required to derive a channel to start
+ * monitor vdev on.
+ */
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+ if (num_ctx == 0)
+ return false;
+
+ /* If there's already an existing special monitor interface then don't
+ * bother creating another monitor vdev.
+ */
+ if (ar->monitor_arvif)
+ return false;
+
+ return ar->monitor ||
+ (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
+ ar->running_fw->fw_file.fw_features) &&
+ (ar->filter_flags & FIF_OTHER_BSS)) ||
+ test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+}
+
+static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
+{
+ int num_ctx;
+
+ num_ctx = ath10k_mac_num_chanctxs(ar);
+
+ /* FIXME: Current interface combinations and cfg80211/mac80211 code
+ * shouldn't allow this but make sure to prevent handling the following
+ * case anyway since multi-channel DFS hasn't been tested at all.
+ */
+ if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
+ return false;
+
+ return true;
+}
+
+static int ath10k_monitor_recalc(struct ath10k *ar)
+{
+ bool needed;
+ bool allowed;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ needed = ath10k_mac_monitor_vdev_is_needed(ar);
+ allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac monitor recalc started? %d needed? %d allowed? %d\n",
+ ar->monitor_started, needed, allowed);
+
+ if (WARN_ON(needed && !allowed)) {
+ if (ar->monitor_started) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
+
+ ret = ath10k_monitor_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
+ ret);
+ /* not serious */
+ }
+
+ return -EPERM;
+ }
+
+ if (needed == ar->monitor_started)
+ return 0;
+
+ if (needed)
+ return ath10k_monitor_start(ar);
+ else
+ return ath10k_monitor_stop(ar);
+}
+
+static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!arvif->is_started) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->protection_mode;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
+ arvif->vdev_id, arvif->use_cts_prot);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->use_cts_prot ? 1 : 0);
+}
+
+static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = ar->wmi.vdev_param->enable_rtscts;
+
+ rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
+ WMI_RTSCTS_PROFILE);
+ else
+ rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_PROFILE);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ rts_cts);
+}
+
+static int ath10k_start_cac(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+
+ ret = ath10k_monitor_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath10k_stop_cac(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* CAC is not running - do nothing */
+ if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
+ return 0;
+
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ath10k_monitor_stop(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
+
+ return 0;
+}
+
+static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ bool *ret = data;
+
+ if (!*ret && conf->radar_enabled)
+ *ret = true;
+}
+
+static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
+{
+ bool has_radar = false;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_has_radar_iter,
+ &has_radar);
+
+ return has_radar;
+}
+
+static void ath10k_recalc_radar_detection(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_stop_cac(ar);
+
+ if (!ath10k_mac_has_radar_enabled(ar))
+ return;
+
+ if (ar->num_started_vdevs > 0)
+ return;
+
+ ret = ath10k_start_cac(ar);
+ if (ret) {
+ /*
+ * Not possible to start CAC on current channel so starting
+ * radiation is not allowed, make this channel DFS_UNAVAILABLE
+ * by indicating that radar was detected.
+ */
+ ath10k_warn(ar, "failed to start CAC: %d\n", ret);
+ ieee80211_radar_detected(ar->hw);
+ }
+}
+
+static int ath10k_vdev_stop(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ if (ar->num_started_vdevs != 0) {
+ ar->num_started_vdevs--;
+ ath10k_recalc_radar_detection(ar);
+ }
+
+ return ret;
+}
+
+static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *chandef,
+ bool restart)
+{
+ struct ath10k *ar = arvif->ar;
+ struct wmi_vdev_start_request_arg arg = {};
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+ arg.channel.mode = chan_to_phymode(chandef);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power * 2;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+ } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ arg.ssid = arvif->vif->bss_conf.ssid;
+ arg.ssid_len = arvif->vif->bss_conf.ssid_len;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath10k_wmi_phymode_str(arg.channel.mode));
+
+ if (restart)
+ ret = ath10k_wmi_vdev_restart(ar, &arg);
+ else
+ ret = ath10k_wmi_vdev_start(ar, &arg);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
+ arg.vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_setup_sync(ar);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to synchronize setup for vdev %i restart %d: %d\n",
+ arg.vdev_id, restart, ret);
+ return ret;
+ }
+
+ ar->num_started_vdevs++;
+ ath10k_recalc_radar_detection(ar);
+
+ return ret;
+}
+
+static int ath10k_vdev_start(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
+{
+ return ath10k_vdev_start_restart(arvif, def, false);
+}
+
+static int ath10k_vdev_restart(struct ath10k_vif *arvif,
+ const struct cfg80211_chan_def *def)
+{
+ return ath10k_vdev_start_restart(arvif, def, true);
+}
+
+static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
+ return 0;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie)
+ return -ENOENT;
+
+ ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ size_t len;
+ const u8 *next;
+ const u8 *end;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
+static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ int ret;
+
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return 0;
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs);
+ if (!bcn) {
+ ath10k_warn(ar, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
+ kfree_skb(bcn);
+ return ret;
+ }
+
+ /* P2P IE is inserted by firmware automatically (as configured above)
+ * so remove it from the base beacon template to avoid duplicate P2P
+ * IEs in beacon frames.
+ */
+ ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+
+ ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
+ 0, NULL, 0);
+ kfree_skb(bcn);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to submit beacon template command: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct sk_buff *prb;
+ int ret;
+
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ /* For mesh, probe response and beacon share the same template */
+ if (ieee80211_vif_is_mesh(vif))
+ return 0;
+
+ prb = ieee80211_proberesp_get(hw, vif);
+ if (!prb) {
+ ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
+ kfree_skb(prb);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct cfg80211_chan_def def;
+ int ret;
+
+ /* When originally vdev is started during assign_vif_chanctx() some
+ * information is missing, notably SSID. Firmware revisions with beacon
+ * offloading require the SSID to be provided during vdev (re)start to
+ * handle hidden SSID properly.
+ *
+ * Vdev restart must be done after vdev has been both started and
+ * upped. Otherwise some firmware revisions (at least 10.2) fail to
+ * deliver vdev restart response event causing timeouts during vdev
+ * syncing in ath10k.
+ *
+ * Note: The vdev down/up and template reinstallation could be skipped
+ * since only wmi-tlv firmware are known to have beacon offload and
+ * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
+ * response delivery. It's probably more robust to keep it as is.
+ */
+ if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
+ return 0;
+
+ if (WARN_ON(!arvif->is_started))
+ return -EINVAL;
+
+ if (WARN_ON(!arvif->is_up))
+ return -EINVAL;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return -EINVAL;
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
+ * firmware will crash upon vdev up.
+ */
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update presp template: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_vdev_restart(arvif, &def);
+ if (ret) {
+ ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_control_beaconing(struct ath10k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath10k *ar = arvif->ar;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+
+ spin_lock_bh(&arvif->ar->data_lock);
+ ath10k_mac_vif_beacon_free(arvif);
+ spin_unlock_bh(&arvif->ar->data_lock);
+
+ return;
+ }
+
+ arvif->tx_seq_no = 0x1000;
+
+ arvif->aid = 0;
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath10k_control_ibss(struct ath10k_vif *arvif,
+ struct ieee80211_bss_conf *info,
+ const u8 self_peer[ETH_ALEN])
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->ibss_joined) {
+ if (is_zero_ether_addr(arvif->bssid))
+ return;
+
+ eth_zero_addr(arvif->bssid);
+
+ return;
+ }
+
+ vdev_param = arvif->ar->wmi.vdev_param->atim_window;
+ ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
+ ATH10K_DEFAULT_ATIM);
+ if (ret)
+ ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ u32 value;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
+ else
+ value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+
+ param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
+ value, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 param;
+ u32 value;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
+ else
+ value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+
+ param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
+ value, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_num_vifs_started(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int num = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->is_started)
+ num++;
+
+ return num;
+}
+
+static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ar->hw->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int ps_timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+
+ enable_ps = arvif->ps;
+
+ if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
+ !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
+ ar->running_fw->fw_file.fw_features)) {
+ ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
+ arvif->vdev_id);
+ enable_ps = false;
+ }
+
+ if (!arvif->is_started) {
+ /* mac80211 can update vif powersave state while disconnected.
+ * Firmware doesn't behave nicely and consumes more power than
+ * necessary if PS is disabled on a non-started vdev. Hence
+ * force-enable PS for non-running vdevs.
+ */
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ } else if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ ps_timeout = conf->dynamic_ps_timeout;
+ if (ps_timeout == 0) {
+ /* Firmware doesn't like 0 */
+ ps_timeout = ieee80211_tu_to_usec(
+ vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ ps_timeout);
+ if (ret) {
+ ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
+ if (ret) {
+ ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct wmi_sta_keepalive_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
+ return 0;
+
+ /* Some firmware revisions have a bug and ignore the `enabled` field.
+ * Instead use the interval to disable the keepalive.
+ */
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
+ arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
+
+ ret = ath10k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ int ret;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
+ return;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return;
+
+ if (!vif->csa_active)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ if (!ieee80211_csa_is_complete(vif)) {
+ ieee80211_csa_update_counter(vif);
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+ } else {
+ ieee80211_csa_finish(vif);
+ }
+}
+
+static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ ap_csa_work);
+ struct ath10k *ar = arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_mac_vif_ap_csa_count_down(arvif);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH10K_CONNECTION_LOSS_HZ);
+}
+
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
+/**********************/
+/* Station management */
+/**********************/
+
+static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
+ struct ieee80211_vif *vif)
+{
+ /* Some firmware revisions have unstable STA powersave when listen
+ * interval is set too high (e.g. 5). The symptoms are firmware doesn't
+ * generate NullFunc frames properly even if buffered frames have been
+ * indicated in Beacon TIM. Firmware would seldom wake up to pull
+ * buffered frames. Often pinging the device from AP would simply fail.
+ *
+ * As a workaround set it to 1.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return 1;
+
+ return ar->hw->conf.listen_interval;
+}
+
+static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ u32 aid;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->bss_conf.aid;
+ else
+ aid = sta->aid;
+
+ ether_addr_copy(arg->addr, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_aid = aid;
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
+ arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
+ arg->peer_num_spatial_streams = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
+ struct cfg80211_bss *bss;
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+ if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
+ arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
+ }
+
+ if (wpaie) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
+ arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
+ }
+
+ if (sta->mfp &&
+ test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
+ ar->running_fw->fw_file.fw_features)) {
+ arg->peer_flags |= ar->wmi.peer_flags->pmf;
+ }
+}
+
+static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ enum nl80211_band band;
+ u32 ratemask;
+ u8 rate;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
+ rateset->num_rates++;
+ }
+}
+
+static bool
+ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ int i, n;
+ u8 max_nss;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
+ ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->peer_flags |= ar->wmi.peer_flags->ht;
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath10k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->peer_flags |= ar->wmi.peer_flags->ldbc;
+
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->peer_flags |= ar->wmi.peer_flags->bw40;
+ arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
+ }
+
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+ arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
+ arg->peer_flags |= ar->wmi.peer_flags->stbc;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->peer_flags |= ar->wmi.peer_flags->stbc;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_RC_DS_FLAG;
+
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
+ arg->peer_ht_rates.rates[n++] = i;
+ }
+
+ /*
+ * This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
+ arg->addr,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_num_spatial_streams);
+}
+
+static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ u32 uapsd = 0;
+ u32 max_sp = 0;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (sta->wme && sta->uapsd_queues) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_UAPSD,
+ uapsd);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
+ sta->addr,
+ WMI_AP_PS_PEER_PARAM_MAX_SP,
+ max_sp);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* TODO setup this based on STA listen interval and
+ * beacon interval. Currently we don't know
+ * sta->listen_interval - mac80211 patch required.
+ * Currently use 10 seconds
+ */
+ ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
+ 10);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static u16
+ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0: /* fall through */
+ case 1: /* fall through */
+ case 2: /* fall through */
+ case 3: /* fall through */
+ case 4: /* fall through */
+ case 5: /* fall through */
+ case 6: /* fall through */
+ default:
+ /* see ath10k_mac_can_set_bitrate_mask() */
+ WARN_ON(1);
+ /* fall through */
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u16 *vht_mcs_mask;
+ u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i;
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->peer_flags |= ar->wmi.peer_flags->vht;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller.
+ */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->peer_flags |= ar->wmi.peer_flags->bw80;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->peer_flags |= ar->wmi.peer_flags->bw160;
+
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
+ arg->peer_vht_rates.rx_max_rate =
+ __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->peer_vht_rates.rx_mcs_set =
+ __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->peer_vht_rates.tx_max_rate =
+ __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+ arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+
+ if (arg->peer_vht_rates.rx_max_rate &&
+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) {
+ switch (arg->peer_vht_rates.rx_max_rate) {
+ case 1560:
+ /* Must be 2x2 at 160Mhz is all it can do. */
+ arg->peer_bw_rxnss_override = 2;
+ break;
+ case 780:
+ /* Can only do 1x1 at 160Mhz (Long Guard Interval) */
+ arg->peer_bw_rxnss_override = 1;
+ break;
+ }
+ }
+}
+
+static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme)
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
+
+ if (sta->wme && sta->uapsd_queues) {
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
+ arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (sta->wme)
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
+ break;
+ case WMI_VDEV_TYPE_IBSS:
+ if (sta->wme)
+ arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
+ break;
+ default:
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
+ sta->addr, !!(arg->peer_flags &
+ arvif->ar->wmi.peer_flags->qos));
+}
+
+static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
+ ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
+static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ if (sta->vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+
+ break;
+ case NL80211_BAND_5GHZ:
+ /*
+ * Check VHT first.
+ */
+ if (sta->vht_cap.vht_supported &&
+ !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ phymode = ath10k_mac_get_phymode_vht(ar, sta);
+ } else if (sta->ht_cap.ht_supported &&
+ !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
+ sta->addr, ath10k_wmi_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static int ath10k_peer_assoc_prepare(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wmi_peer_assoc_complete_arg *arg)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ memset(arg, 0, sizeof(*arg));
+
+ ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
+
+ return 0;
+}
+
+static const u32 ath10k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported)
+ return 0;
+
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (smps >= ARRAY_SIZE(ath10k_smps_map))
+ return -EINVAL;
+
+ return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
+ WMI_PEER_SMPS_STATE,
+ ath10k_smps_map[smps]);
+}
+
+static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta_vht_cap vht_cap)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+ u32 param;
+ u32 value;
+
+ if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
+ return 0;
+
+ if (!(ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
+ return 0;
+
+ param = ar->wmi.vdev_param->txbf;
+ value = 0;
+
+ if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
+ return 0;
+
+ /* The following logic is correct. If a remote STA advertises support
+ * for being a beamformer then we should enable us being a beamformee.
+ */
+
+ if (ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ if (ar->vht_cap_info &
+ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
+ value, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* can be called only in mac80211 callbacks due to `key_count` usage */
+static void ath10k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ieee80211_sta_ht_cap ht_cap;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct wmi_peer_assoc_complete_arg peer_arg;
+ struct ieee80211_sta *ap_sta;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ /* ap_sta must be accessed only within rcu section which must be left
+ * before calling ath10k_setup_peer_smps() which might sleep.
+ */
+ ht_cap = ap_sta->ht_cap;
+ vht_cap = ap_sta->vht_cap;
+
+ ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ rcu_read_unlock();
+ return;
+ }
+
+ rcu_read_unlock();
+
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
+ arvif->vdev_id, bss_conf->bssid, ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = bss_conf->aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ /* Workaround: Some firmware revisions (tested with qca6174
+ * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
+ * poked with peer param command.
+ */
+ ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
+ WMI_PEER_DUMMY_VAR, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
+ arvif->bssid, arvif->vdev_id, ret);
+ return;
+ }
+}
+
+static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ieee80211_sta_vht_cap vht_cap = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->def_wep_key_idx = -1;
+
+ ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = false;
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+}
+
+static int ath10k_station_assoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_peer_assoc_complete_arg peer_arg;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (!reassoc) {
+ ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->ht_cap);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ /* Plumb cached keys only for static WEP */
+ if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) {
+ ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int ath10k_station_disassoc(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath10k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+/**************/
+/* Regulatory */
+/**************/
+
+static int ath10k_update_channel_list(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_supported_band **bands;
+ enum nl80211_band band;
+ struct ieee80211_channel *channel;
+ struct wmi_scan_chan_list_arg arg = {0};
+ struct wmi_channel_arg *ch;
+ bool passive;
+ int len;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ arg.n_channels++;
+ }
+ }
+
+ len = sizeof(struct wmi_channel_arg) * arg.n_channels;
+ arg.channels = kzalloc(len, GFP_KERNEL);
+ if (!arg.channels)
+ return -ENOMEM;
+
+ ch = arg.channels;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ ch->allow_ht = true;
+
+ /* FIXME: when should we really allow VHT? */
+ ch->allow_vht = true;
+
+ ch->allow_ibss =
+ !(channel->flags & IEEE80211_CHAN_NO_IR);
+
+ ch->ht40plus =
+ !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
+
+ ch->chan_radar =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ passive = channel->flags & IEEE80211_CHAN_NO_IR;
+ ch->passive = passive;
+
+ /* the firmware is ignoring the "radar" flag of the
+ * channel and is scanning actively using Probe Requests
+ * on "Radar detection"/DFS channels which are not
+ * marked as "available"
+ */
+ ch->passive |= ch->chan_radar;
+
+ ch->freq = channel->center_freq;
+ ch->band_center_freq1 = channel->center_freq;
+ ch->min_power = 0;
+ ch->max_power = channel->max_power * 2;
+ ch->max_reg_power = channel->max_reg_power * 2;
+ ch->max_antenna_gain = channel->max_antenna_gain;
+ ch->reg_class_id = 0; /* FIXME */
+
+ /* FIXME: why use only legacy modes, why not any
+ * HT/VHT modes? Would that even make any
+ * difference?
+ */
+ if (channel->band == NL80211_BAND_2GHZ)
+ ch->mode = MODE_11G;
+ else
+ ch->mode = MODE_11A;
+
+ if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ ch - arg.channels, arg.n_channels,
+ ch->freq, ch->max_power, ch->max_reg_power,
+ ch->max_antenna_gain, ch->mode);
+
+ ch++;
+ }
+ }
+
+ ret = ath10k_wmi_scan_chan_list(ar, &arg);
+ kfree(arg.channels);
+
+ return ret;
+}
+
+static enum wmi_dfs_region
+ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_UNSET:
+ return WMI_UNINIT_DFS_DOMAIN;
+ case NL80211_DFS_FCC:
+ return WMI_FCC_DFS_DOMAIN;
+ case NL80211_DFS_ETSI:
+ return WMI_ETSI_DFS_DOMAIN;
+ case NL80211_DFS_JP:
+ return WMI_MKK4_DFS_DOMAIN;
+ }
+ return WMI_UNINIT_DFS_DOMAIN;
+}
+
+static void ath10k_regd_update(struct ath10k *ar)
+{
+ struct reg_dmn_pair_mapping *regpair;
+ int ret;
+ enum wmi_dfs_region wmi_dfs_reg;
+ enum nl80211_dfs_regions nl_dfs_reg;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_update_channel_list(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to update channel list: %d\n", ret);
+
+ regpair = ar->ath_common.regulatory.regpair;
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ nl_dfs_reg = ar->dfs_detector->region;
+ wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
+ } else {
+ wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
+ }
+
+ /* Target allows setting up per-band regdomain but ath_common provides
+ * a combined one only
+ */
+ ret = ath10k_wmi_pdev_set_regdomain(ar,
+ regpair->reg_domain,
+ regpair->reg_domain, /* 2ghz */
+ regpair->reg_domain, /* 5ghz */
+ regpair->reg_2ghz_ctl,
+ regpair->reg_5ghz_ctl,
+ wmi_dfs_reg);
+ if (ret)
+ ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
+}
+
+static void ath10k_mac_update_channel_list(struct ath10k *ar,
+ struct ieee80211_supported_band *band)
+{
+ int i;
+
+ if (ar->low_5ghz_chan && ar->high_5ghz_chan) {
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < ar->low_5ghz_chan ||
+ band->channels[i].center_freq > ar->high_5ghz_chan)
+ band->channels[i].flags |=
+ IEEE80211_CHAN_DISABLED;
+ }
+ }
+}
+
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath10k *ar = hw->priv;
+ bool result;
+
+ ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
+ request->dfs_region);
+ result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
+ request->dfs_region);
+ if (!result)
+ ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
+ request->dfs_region);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state == ATH10K_STATE_ON)
+ ath10k_regd_update(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ ath10k_mac_update_channel_list(ar,
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ]);
+}
+
+static void ath10k_stop_radar_confirmation(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED;
+ spin_unlock_bh(&ar->data_lock);
+
+ cancel_work_sync(&ar->radar_confirmation_work);
+}
+
+/***************/
+/* TX handlers */
+/***************/
+
+enum ath10k_mac_tx_path {
+ ATH10K_MAC_TX_HTT,
+ ATH10K_MAC_TX_HTT_MGMT,
+ ATH10K_MAC_TX_WMI_MGMT,
+ ATH10K_MAC_TX_UNKNOWN,
+};
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
+{
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused |= BIT(reason);
+ ieee80211_stop_queues(ar->hw);
+}
+
+static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
+{
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
+ ar->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_mac_tx_unlock_iter,
+ ar);
+
+ ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
+}
+
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused |= BIT(reason);
+ ieee80211_stop_queue(ar->hw, arvif->vdev_id);
+}
+
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ WARN_ON(reason >= BITS_PER_LONG);
+ arvif->tx_paused &= ~BIT(reason);
+
+ if (ar->tx_paused)
+ return;
+
+ if (arvif->tx_paused)
+ return;
+
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+}
+
+static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ switch (action) {
+ case WMI_TLV_TX_PAUSE_ACTION_STOP:
+ ath10k_mac_vif_tx_lock(arvif, pause_id);
+ break;
+ case WMI_TLV_TX_PAUSE_ACTION_WAKE:
+ ath10k_mac_vif_tx_unlock(arvif, pause_id);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "received unknown tx pause action %d on vdev %i, ignoring\n",
+ action, arvif->vdev_id);
+ break;
+ }
+}
+
+struct ath10k_mac_tx_pause {
+ u32 vdev_id;
+ enum wmi_tlv_tx_pause_id pause_id;
+ enum wmi_tlv_tx_pause_action action;
+};
+
+static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_mac_tx_pause *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
+}
+
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action)
+{
+ struct ath10k_mac_tx_pause arg = {
+ .vdev_id = vdev_id,
+ .pause_id = pause_id,
+ .action = action,
+ };
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ ath10k_mac_handle_tx_pause_iter,
+ &arg);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+static enum ath10k_hw_txrx_mode
+ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ const struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
+ return ATH10K_HW_TXRX_RAW;
+
+ if (ieee80211_is_mgmt(fc))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * NullFunc frames are mostly used to ping if a client or AP are still
+ * reachable and responsive. This implies tx status reports must be
+ * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
+ * come to a conclusion that the other end disappeared and tear down
+ * BSS connection or it can never disconnect from BSS/client (which is
+ * the case).
+ *
+ * Firmware with HTT older than 3.0 delivers incorrect tx status for
+ * NullFunc frames to driver. However there's a HTT Mgmt Tx command
+ * which seems to deliver correct tx reports for NullFunc frames. The
+ * downside of using it is it ignores client powersave state so it can
+ * end up disconnecting sleeping clients in AP mode. It should fix STA
+ * mode though because AP don't sleep.
+ */
+ if (ar->htt.target_version_major < 3 &&
+ (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
+ !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->running_fw->fw_file.fw_features))
+ return ATH10K_HW_TXRX_MGMT;
+
+ /* Workaround:
+ *
+ * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
+ * NativeWifi txmode - it selects AP key instead of peer key. It seems
+ * to work with Ethernet txmode so use it.
+ *
+ * FIXME: Check if raw mode works with TDLS.
+ */
+ if (ieee80211_is_data_present(fc) && sta && sta->tdls)
+ return ATH10K_HW_TXRX_ETHERNET;
+
+ if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ return ATH10K_HW_TXRX_RAW;
+
+ return ATH10K_HW_TXRX_NATIVE_WIFI;
+}
+
+static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ const struct ieee80211_hdr *hdr = (void *)skb->data;
+ const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
+ IEEE80211_TX_CTL_INJECTED;
+
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return false;
+
+ if ((info->flags & mask) == mask)
+ return false;
+
+ if (vif)
+ return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt;
+
+ return true;
+}
+
+/* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
+ * Control in the header.
+ */
+static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ /* Some firmware revisions don't handle sending QoS NullFunc well.
+ * These frames are mainly used for CQM purposes so it doesn't really
+ * matter whether QoS NullFunc or NullFunc are sent.
+ */
+ hdr = (void *)skb->data;
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+ cb->flags &= ~ATH10K_SKB_F_QOS;
+
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static void ath10k_tx_h_8023(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ struct rfc1042_hdr *rfc1042;
+ struct ethhdr *eth;
+ size_t hdrlen;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ __be16 type;
+
+ hdr = (void *)skb->data;
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ rfc1042 = (void *)skb->data + hdrlen;
+
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ type = rfc1042->snap_type;
+
+ skb_pull(skb, hdrlen + sizeof(*rfc1042));
+ skb_push(skb, sizeof(*eth));
+
+ eth = (void *)skb->data;
+ ether_addr_copy(eth->h_dest, da);
+ ether_addr_copy(eth->h_source, sa);
+ eth->h_proto = type;
+}
+
+static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ /* This is case only for P2P_GO */
+ if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
+ return;
+
+ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
+ spin_lock_bh(&ar->data_lock);
+ if (arvif->u.ap.noa_data)
+ if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
+ GFP_ATOMIC))
+ skb_put_data(skb, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+ spin_unlock_bh(&ar->data_lock);
+ }
+}
+
+static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_txq *txq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+ cb->flags = 0;
+ if (!ath10k_tx_h_use_hwcrypto(vif, skb))
+ cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ cb->flags |= ATH10K_SKB_F_MGMT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ cb->flags |= ATH10K_SKB_F_QOS;
+
+ cb->vif = vif;
+ cb->txq = txq;
+}
+
+bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
+{
+ /* FIXME: Not really sure since when the behaviour changed. At some
+ * point new firmware stopped requiring creation of peer entries for
+ * offchannel tx (and actually creating them causes issues with wmi-htc
+ * tx credit replenishment and reliability). Assuming it's at least 3.4
+ * because that's when the `freq` was introduced to TX_FRM HTT command.
+ */
+ return (ar->htt.target_version_major >= 3 &&
+ ar->htt.target_version_minor >= 4 &&
+ ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
+}
+
+static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+
+ if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
+ ath10k_warn(ar, "wmi mgmt tx queue is full\n");
+ return -ENOSPC;
+ }
+
+ skb_queue_tail(q, skb);
+ ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
+
+ return 0;
+}
+
+static enum ath10k_mac_tx_path
+ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
+ struct sk_buff *skb,
+ enum ath10k_hw_txrx_mode txmode)
+{
+ switch (txmode) {
+ case ATH10K_HW_TXRX_RAW:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ case ATH10K_HW_TXRX_ETHERNET:
+ return ATH10K_MAC_TX_HTT;
+ case ATH10K_HW_TXRX_MGMT:
+ if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->running_fw->fw_file.fw_features) ||
+ test_bit(WMI_SERVICE_MGMT_TX_WMI,
+ ar->wmi.svc_map))
+ return ATH10K_MAC_TX_WMI_MGMT;
+ else if (ar->htt.target_version_major >= 3)
+ return ATH10K_MAC_TX_HTT;
+ else
+ return ATH10K_MAC_TX_HTT_MGMT;
+ }
+
+ return ATH10K_MAC_TX_UNKNOWN;
+}
+
+static int ath10k_mac_tx_submit(struct ath10k *ar,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ int ret = -EINVAL;
+
+ switch (txpath) {
+ case ATH10K_MAC_TX_HTT:
+ ret = ath10k_htt_tx(htt, txmode, skb);
+ break;
+ case ATH10K_MAC_TX_HTT_MGMT:
+ ret = ath10k_htt_mgmt_tx(htt, skb);
+ break;
+ case ATH10K_MAC_TX_WMI_MGMT:
+ ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ break;
+ case ATH10K_MAC_TX_UNKNOWN:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+
+ return ret;
+}
+
+/* This function consumes the sk_buff regardless of return value as far as
+ * caller is concerned so no freeing is necessary afterwards.
+ */
+static int ath10k_mac_tx(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb, bool noque_offchan)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int ret;
+
+ /* We should disable CCK RATE due to P2P */
+ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_MGMT:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ ath10k_tx_h_nwifi(hw, skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+ ath10k_tx_h_seq_no(vif, skb);
+ break;
+ case ATH10K_HW_TXRX_ETHERNET:
+ ath10k_tx_h_8023(skb);
+ break;
+ case ATH10K_HW_TXRX_RAW:
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return -ENOTSUPP;
+ }
+ }
+
+ if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!ath10k_mac_tx_frm_has_freq(ar)) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
+ skb, skb->len);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+ return 0;
+ }
+ }
+
+ ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit frame: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath10k_offchan_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->offchan_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_offchan_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct sk_buff *skb;
+ const u8 *peer_addr;
+ int vdev_id;
+ int ret;
+ unsigned long time_left;
+ bool tmp_peer_created = false;
+
+ /* FW requirement: We must create a peer before FW will send out
+ * an offchannel frame. Otherwise the frame will be stuck and
+ * never transmitted. We delete the peer upon tx completion.
+ * It is unlikely that a peer for offchannel tx will already be
+ * present. However it may be in some rare cases so account for that.
+ * Otherwise we might remove a legitimate peer and break stuff.
+ */
+
+ for (;;) {
+ skb = skb_dequeue(&ar->offchan_tx_queue);
+ if (!skb)
+ break;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
+ skb, skb->len);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ peer_addr = ieee80211_get_DA(hdr);
+
+ spin_lock_bh(&ar->data_lock);
+ vdev_id = ar->scan.vdev_id;
+ peer = ath10k_peer_find(ar, vdev_id, peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (peer)
+ /* FIXME: should this use ath10k_warn()? */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
+ peer_addr, vdev_id);
+
+ if (!peer) {
+ ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
+ peer_addr,
+ WMI_PEER_TYPE_DEFAULT);
+ if (ret)
+ ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
+ peer_addr, vdev_id, ret);
+ tmp_peer_created = (ret == 0);
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ reinit_completion(&ar->offchan_tx_completed);
+ ar->offchan_tx_skb = skb;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It's safe to access vif and sta - conf_mutex guarantees that
+ * sta_state() and remove_interface() are locked exclusively
+ * out wrt to this offchannel worker.
+ */
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif) {
+ vif = arvif->vif;
+ sta = ieee80211_find_sta(vif, peer_addr);
+ } else {
+ vif = NULL;
+ sta = NULL;
+ }
+
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
+ ret);
+ /* not serious */
+ }
+
+ time_left =
+ wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
+ if (time_left == 0)
+ ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
+ skb, skb->len);
+
+ if (!peer && tmp_peer_created) {
+ ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
+ if (ret)
+ ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
+ peer_addr, vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+}
+
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ for (;;) {
+ skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
+ if (!skb)
+ break;
+
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ar->dev, paddr)) {
+ ieee80211_free_txskb(ar->hw, skb);
+ continue;
+ }
+ ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
+ ret);
+ /* remove this msdu from idr tracking */
+ ath10k_wmi_cleanup_mgmt_tx_send(ar, skb);
+
+ dma_unmap_single(ar->dev, paddr, skb->len,
+ DMA_TO_DEVICE);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ } else {
+ ret = ath10k_wmi_mgmt_tx(ar, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ }
+ }
+}
+
+static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq;
+
+ if (!txq)
+ return;
+
+ artxq = (void *)txq->drv_priv;
+ INIT_LIST_HEAD(&artxq->list);
+}
+
+static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq;
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *msdu;
+ int msdu_id;
+
+ if (!txq)
+ return;
+
+ artxq = (void *)txq->drv_priv;
+ spin_lock_bh(&ar->txqs_lock);
+ if (!list_empty(&artxq->list))
+ list_del_init(&artxq->list);
+ spin_unlock_bh(&ar->txqs_lock);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
+ cb = ATH10K_SKB_CB(msdu);
+ if (cb->txq == txq)
+ cb->txq = NULL;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ peer = ar->peer_map[peer_id];
+ if (!peer)
+ return NULL;
+
+ if (peer->removed)
+ return NULL;
+
+ if (peer->sta)
+ return peer->sta->txq[tid];
+ else if (peer->vif)
+ return peer->vif->txq;
+ else
+ return NULL;
+}
+
+static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ /* No need to get locks */
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
+ return true;
+
+ if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
+ return true;
+
+ if (artxq->num_fw_queued < artxq->num_push_allowed)
+ return true;
+
+ return false;
+}
+
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_vif *vif = txq->vif;
+ struct ieee80211_sta *sta = txq->sta;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ size_t skb_len;
+ bool is_mgmt, is_presp;
+ int ret;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_inc_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ if (ret)
+ return ret;
+
+ skb = ieee80211_tx_dequeue(hw, txq);
+ if (!skb) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return -ENOENT;
+ }
+
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+ skb_len = skb->len;
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+ if (is_mgmt) {
+ hdr = (struct ieee80211_hdr *)skb->data;
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+
+ if (ret) {
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ return ret;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return ret;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq->num_fw_queued++;
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return skb_len;
+}
+
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ struct ath10k_txq *last;
+ int ret;
+ int max;
+
+ if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
+ return;
+
+ spin_lock_bh(&ar->txqs_lock);
+ rcu_read_lock();
+
+ last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
+ while (!list_empty(&ar->txqs)) {
+ artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+ txq = container_of((void *)artxq, struct ieee80211_txq,
+ drv_priv);
+
+ /* Prevent aggressive sta/tid taking over tx queue */
+ max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
+ ret = 0;
+ while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+ }
+
+ list_del_init(&artxq->list);
+ if (ret != -ENOENT)
+ list_add_tail(&artxq->list, &ar->txqs);
+
+ ath10k_htt_tx_txq_update(hw, txq);
+
+ if (artxq == last || (ret < 0 && ret != -ENOENT))
+ break;
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->txqs_lock);
+}
+EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
+
+/************/
+/* Scanning */
+/************/
+
+void __ath10k_scan_finish(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = (ar->scan.state ==
+ ATH10K_SCAN_ABORTING),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ } else if (ar->scan.roc_notify) {
+ ieee80211_remain_on_channel_expired(ar->hw);
+ }
+ /* fall through */
+ case ATH10K_SCAN_STARTING:
+ ar->scan.state = ATH10K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ ath10k_offchan_tx_purge(ar);
+ cancel_delayed_work(&ar->scan.timeout);
+ complete(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath10k_scan_finish(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath10k_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_scan_stop(struct ath10k *ar)
+{
+ struct wmi_stop_scan_arg arg = {
+ .req_id = 1, /* FIXME */
+ .req_type = WMI_SCAN_STOP_ONE,
+ .u.scan_id = ATH10K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_stop_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH10K_SCAN_IDLE)
+ __ath10k_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath10k_scan_abort(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH10K_SCAN_STARTING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ ar->scan.state = ATH10K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_scan_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath10k_wmi_start_scan(ar, arg);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
+ if (ret == 0) {
+ ret = ath10k_scan_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH10K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+/**********************/
+/* mac80211 callbacks */
+/**********************/
+
+static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_txq *txq = NULL;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ bool is_htt;
+ bool is_mgmt;
+ bool is_presp;
+ int ret;
+
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_htt = (txpath == ATH10K_MAC_TX_HTT ||
+ txpath == ATH10K_MAC_TX_HTT_MGMT);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
+
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
+
+ ret = ath10k_htt_tx_inc_pending(htt);
+ if (ret) {
+ ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
+ ret);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
+ return;
+ }
+
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
+ ret);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
+ return;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+ return;
+ }
+}
+
+static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_txq *f_txq;
+ struct ath10k_txq *f_artxq;
+ int ret = 0;
+ int max = HTC_HOST_MAX_MSG_PER_TX_BUNDLE;
+
+ spin_lock_bh(&ar->txqs_lock);
+ if (list_empty(&artxq->list))
+ list_add_tail(&artxq->list, &ar->txqs);
+
+ f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+ f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
+ list_del_init(&f_artxq->list);
+
+ while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, f_txq);
+ if (ret < 0)
+ break;
+ }
+ if (ret != -ENOENT)
+ list_add_tail(&f_artxq->list, &ar->txqs);
+ spin_unlock_bh(&ar->txqs_lock);
+
+ ath10k_htt_tx_txq_update(hw, f_txq);
+ ath10k_htt_tx_txq_update(hw, txq);
+}
+
+/* Must not be called with conf_mutex held as workers can use that also. */
+void ath10k_drain_tx(struct ath10k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ ath10k_offchan_tx_purge(ar);
+ ath10k_mgmt_over_wmi_tx_purge(ar);
+
+ cancel_work_sync(&ar->offchan_tx_work);
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+}
+
+void ath10k_halt(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
+ ar->filter_flags = 0;
+ ar->monitor = false;
+ ar->monitor_arvif = NULL;
+
+ if (ar->monitor_started)
+ ath10k_monitor_stop(ar);
+
+ ar->monitor_started = false;
+ ar->tx_paused = 0;
+
+ ath10k_scan_finish(ar);
+ ath10k_peer_cleanup_all(ar);
+ ath10k_stop_radar_confirmation(ar);
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ ath10k_mac_vif_beacon_cleanup(arvif);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
+{
+ /* It is not clear that allowing gaps in chainmask
+ * is helpful. Probably it will not do what user
+ * is hoping for, so warn in that case.
+ */
+ if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
+ return;
+
+ ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
+ dbg, cm);
+}
+
+static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
+{
+ int nsts = ar->vht_cap_info;
+
+ nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+
+ /* If firmware does not deliver to host number of space-time
+ * streams supported, assume it support up to 4 BF STS and return
+ * the value for VHT CAP: nsts-1)
+ */
+ if (nsts == 0)
+ return 3;
+
+ return nsts;
+}
+
+static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
+{
+ int sound_dim = ar->vht_cap_info;
+
+ sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+
+ /* If the sounding dimension is not advertised by the firmware,
+ * let's use a default value of 1
+ */
+ if (sound_dim == 0)
+ return 1;
+
+ return sound_dim;
+}
+
+static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {0};
+ struct ath10k_hw_params *hw = &ar->hw_params;
+ u16 mcs_map;
+ u32 val;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->vht_cap_info;
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
+ val = ath10k_mac_get_vht_cap_bf_sts(ar);
+ val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
+ val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
+ val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ vht_cap.cap |= val;
+ }
+
+ /* Currently the firmware seems to be buggy, don't enable 80+80
+ * mode until that's resolved.
+ */
+ if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) &&
+ (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0)
+ vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+
+ mcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ if (ar->cfg_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
+
+ /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
+ * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give
+ * user-space a clue if that is the case.
+ */
+ if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
+ (hw->vht160_mcs_rx_highest != 0 ||
+ hw->vht160_mcs_tx_highest != 0)) {
+ vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
+ vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
+ }
+
+ return vht_cap;
+}
+
+static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {0};
+
+ if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |=
+ WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar->ht_cap_info;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ /* max AMSDU is implicitly taken from vht_cap_info */
+ if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rf_chains; i++) {
+ if (ar->cfg_rx_chainmask & BIT(i))
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+ }
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
+{
+ struct ieee80211_supported_band *band;
+ struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_sta_ht_cap ht_cap;
+
+ ht_cap = ath10k_get_ht_cap(ar);
+ vht_cap = ath10k_create_vht_cap(ar);
+
+ if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->ht_cap = ht_cap;
+ }
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->ht_cap = ht_cap;
+ band->vht_cap = vht_cap;
+ }
+}
+
+static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_check_chain_mask(ar, tx_ant, "tx");
+ ath10k_check_chain_mask(ar, rx_ant, "rx");
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if ((ar->state != ATH10K_STATE_ON) &&
+ (ar->state != ATH10K_STATE_RESTARTED))
+ return 0;
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
+ tx_ant);
+ if (ret) {
+ ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
+ rx_ant);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ /* Reload HT/VHT capability */
+ ath10k_mac_setup_ht_vht_cap(ar);
+
+ return 0;
+}
+
+static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_start(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ u32 param;
+ int ret = 0;
+
+ /*
+ * This makes sense only when restarting hw. It is harmless to call
+ * unconditionally. This is necessary to make sure no HTT/WMI tx
+ * commands will be submitted while restarting.
+ */
+ ath10k_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH10K_STATE_OFF:
+ ar->state = ATH10K_STATE_ON;
+ break;
+ case ATH10K_STATE_RESTARTING:
+ ar->state = ATH10K_STATE_RESTARTED;
+ break;
+ case ATH10K_STATE_ON:
+ case ATH10K_STATE_RESTARTED:
+ case ATH10K_STATE_WEDGED:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ case ATH10K_STATE_UTF:
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err(ar, "Could not init hif: %d\n", ret);
+ goto err_off;
+ }
+
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
+ if (ret) {
+ ath10k_err(ar, "Could not init core: %d\n", ret);
+ goto err_power_down;
+ }
+
+ param = ar->wmi.pdev_param->pmf_qos;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ param = ar->wmi.pdev_param->dynamic_bw;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
+ if (ret) {
+ ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
+ goto err_core_stop;
+ }
+ }
+
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ret = ath10k_wmi_adaptive_qcs(ar, true);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
+ param = ar->wmi.pdev_param->burst_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable burst: %d\n", ret);
+ goto err_core_stop;
+ }
+ }
+
+ param = ar->wmi.pdev_param->idle_ps_config;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret);
+ goto err_core_stop;
+ }
+
+ __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+ /*
+ * By default FW set ARP frames ac to voice (6). In that case ARP
+ * exchange is not working properly for UAPSD enabled AP. ARP requests
+ * which arrives with access category 0 are processed by network stack
+ * and send back with access category 0, but FW changes access category
+ * to 6. Set ARP frames access category to best effort (0) solves
+ * this problem.
+ */
+
+ param = ar->wmi.pdev_param->arp_ac_override;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+
+ if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
+ WMI_CCA_DETECT_LEVEL_AUTO,
+ WMI_CCA_DETECT_MARGIN_AUTO);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ param = ar->wmi.pdev_param->ani_enable;
+ ret = ath10k_wmi_pdev_set_param(ar, param, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable ani by default: %d\n",
+ ret);
+ goto err_core_stop;
+ }
+
+ ar->ani_enabled = true;
+
+ if (ath10k_peer_stats_enabled(ar)) {
+ param = ar->wmi.pdev_param->peer_stats_update_period;
+ ret = ath10k_wmi_pdev_set_param(ar, param,
+ PEER_DEFAULT_STATS_UPDATE_PERIOD);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set peer stats period : %d\n",
+ ret);
+ goto err_core_stop;
+ }
+ }
+
+ param = ar->wmi.pdev_param->enable_btcoex;
+ if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
+ test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
+ ar->running_fw->fw_file.fw_features)) {
+ ret = ath10k_wmi_pdev_set_param(ar, param, 0);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to set btcoex param: %d\n", ret);
+ goto err_core_stop;
+ }
+ clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
+ }
+
+ ar->num_started_vdevs = 0;
+ ath10k_regd_update(ar);
+
+ ath10k_spectral_start(ar);
+ ath10k_thermal_set_throttling(ar);
+
+ ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_core_stop:
+ ath10k_core_stop(ar);
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+err_off:
+ ar->state = ATH10K_STATE_OFF;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_stop(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+
+ ath10k_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_OFF) {
+ ath10k_halt(ar);
+ ar->state = ATH10K_STATE_OFF;
+ }
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_work_sync(&ar->set_coverage_class_work);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->restart_work);
+}
+
+static int ath10k_config_ps(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
+{
+ int ret;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
+
+ param = ar->wmi.pdev_param->txpower_limit2g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ param = ar->wmi.pdev_param->txpower_limit5g;
+ ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
+ if (ret) {
+ ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_mac_txpower_recalc(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret, txpower = -1;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->txpower <= 0)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ ret = ath10k_mac_txpower_setup(ar, txpower);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup tx power %d: %d\n",
+ txpower, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_PS)
+ ath10k_config_ps(ar);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static u32 get_nss_from_chainmask(u16 chain_mask)
+{
+ if ((chain_mask & 0xf) == 0xf)
+ return 4;
+ else if ((chain_mask & 0x7) == 0x7)
+ return 3;
+ else if ((chain_mask & 0x3) == 0x3)
+ return 2;
+ return 1;
+}
+
+static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath10k *ar = arvif->ar;
+ int nsts;
+ int sound_dim;
+
+ if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
+ return 0;
+
+ nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
+ value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+
+ sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
+ if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
+ value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+
+ if (!value)
+ return 0;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
+ value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
+ WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
+ value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
+ WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
+
+ return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ ar->wmi.vdev_param->txbf, value);
+}
+
+/*
+ * TODO:
+ * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ * because we will send mgmt frames without CCK. This requirement
+ * for P2P_FIND/GO_NEG should be handled by checking CCK flag
+ * in the TX packet.
+ */
+static int ath10k_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_peer *peer;
+ enum wmi_sta_powersave_param param;
+ int ret = 0;
+ u32 value;
+ int bit;
+ int i;
+ u32 vdev_param;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ memset(arvif, 0, sizeof(*arvif));
+ ath10k_mac_txq_init(vif->txq);
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath10k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ if (ar->num_peers >= ar->max_num_peers) {
+ ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ if (ar->free_vdev_map == 0) {
+ ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ bit = __ffs64(ar->free_vdev_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
+ bit, ar->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype =
+ ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ if (vif->p2p)
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_MESH_11S);
+ } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ret = -EINVAL;
+ ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
+ goto err;
+ }
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ break;
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+
+ if (vif->p2p)
+ arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
+ (ar, WMI_VDEV_SUBTYPE_P2P_GO);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ /* Using vdev_id as queue number will make it very easy to do per-vif
+ * tx queue locking. This shouldn't wrap due to interface combinations
+ * but do a modulo for correctness sake and prevent using offchannel tx
+ * queues for regular vif tx.
+ */
+ vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
+
+ /* Some firmware revisions don't wait for beacon tx completion before
+ * sending another SWBA event. This could lead to hardware using old
+ * (freed) beacon data in some cases, e.g. tx credit starvation
+ * combined with missed TBTT. This is very very rare.
+ *
+ * On non-IOMMU-enabled hosts this could be a possible security issue
+ * because hw could beacon some random data on the air. On
+ * IOMMU-enabled hosts DMAR faults would occur in most cases and target
+ * device would crash.
+ *
+ * Since there are no beacon tx completions (implicit nor explicit)
+ * propagated to host the only workaround for this is to allocate a
+ * DMA-coherent buffer for a lifetime of a vif and use it for all
+ * beacon tx commands. Worst case for this approach is some beacons may
+ * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
+ if (!arvif->beacon_buf) {
+ ret = -ENOMEM;
+ ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
+ ret);
+ goto err;
+ }
+ }
+ if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
+ arvif->nohwcrypt = true;
+
+ if (arvif->nohwcrypt &&
+ !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ ret = -EINVAL;
+ ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
+ goto err;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ arvif->beacon_buf ? "single-buf" : "per-skb");
+
+ ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
+ arvif->vdev_subtype, vif->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
+ spin_lock_bh(&ar->data_lock);
+ list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It makes no sense to have firmware do keepalives. mac80211 already
+ * takes care of this with idle connection polling.
+ */
+ ret = ath10k_mac_vif_disable_keepalive(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+
+ arvif->def_wep_key_idx = -1;
+
+ vdev_param = ar->wmi.vdev_param->tx_encap_type;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ ATH10K_HW_TXRX_NATIVE_WIFI);
+ /* 10.X firmware does not support this VDEV parameter. Do not warn */
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+
+ /* Configuring number of spatial stream for monitor interface is causing
+ * target assert in qca9888 and qca6174.
+ */
+ if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
+ u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss,
+ ret);
+ goto err_vdev_delete;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
+ vif->addr, WMI_PEER_TYPE_DEFAULT);
+ if (ret) {
+ ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_delete;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ret = -ENOENT;
+ goto err_peer_delete;
+ }
+
+ arvif->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+ } else {
+ arvif->peer_id = HTT_INVALID_PEERID;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath10k_mac_set_kickout(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+ }
+
+ ret = ath10k_mac_set_txbf_conf(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_delete;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ goto err_peer_delete;
+ }
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = arvif;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ goto err_peer_delete;
+ }
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ if (!ar->tx_paused)
+ ieee80211_wake_queue(ar->hw, arvif->vdev_id);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_peer_delete:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
+ ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
+
+err_vdev_delete:
+ ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+err:
+ if (arvif->beacon_buf) {
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf, arvif->beacon_paddr);
+ arvif->beacon_buf = NULL;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
+{
+ int i;
+
+ for (i = 0; i < BITS_PER_LONG; i++)
+ ath10k_mac_vif_tx_unlock(arvif, i);
+}
+
+static void ath10k_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_peer *peer;
+ int ret;
+ int i;
+
+ cancel_work_sync(&arvif->ap_csa_work);
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_vif_beacon_cleanup(arvif);
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_spectral_vif_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ ar->free_vdev_map |= 1LL << arvif->vdev_id;
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
+ vif->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ kfree(arvif->u.ap.noa_data);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
+ arvif->vdev_id);
+
+ ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ /* Some firmware revisions don't notify host about self-peer removal
+ * until after associated vdev is deleted.
+ */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
+ ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
+ vif->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->num_peers--;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->vif == vif) {
+ ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->vif = NULL;
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_peer_cleanup(ar, arvif->vdev_id);
+ ath10k_mac_txq_unref(ar, vif->txq);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ar->monitor_arvif = NULL;
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+ }
+
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_mac_vif_tx_unlock_all(arvif);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ ath10k_mac_txq_unref(ar, vif->txq);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/*
+ * FIXME: Has to be verified.
+ */
+#define SUPPORTED_FILTERS \
+ (FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath10k_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ changed_flags &= SUPPORTED_FILTERS;
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ ret = ath10k_monitor_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ u32 vdev_param, pdev_param, slottime, preamble;
+ u16 bitrate, hw_value;
+ u8 rate;
+ int rateidx, ret = 0;
+ enum nl80211_band band;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_IBSS)
+ ath10k_control_ibss(arvif, info, vif->addr);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+ vdev_param = ar->wmi.vdev_param->beacon_interval;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->beacon_interval);
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d beacon_interval %d\n",
+ arvif->vdev_id, arvif->beacon_interval);
+
+ if (ret)
+ ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "vdev %d set beacon tx mode to staggered\n",
+ arvif->vdev_id);
+
+ pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
+ ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
+ WMI_BEACON_STAGGERED_MODE);
+ if (ret)
+ ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update beacon template: %d\n",
+ ret);
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ /* mesh doesn't use SSID but firmware needs it */
+ strncpy(arvif->u.ap.ssid, "mesh",
+ sizeof(arvif->u.ap.ssid));
+ arvif->u.ap.ssid_len = 4;
+ }
+ }
+
+ if (changed & BSS_CHANGED_AP_PROBE_RESP) {
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d dtim_period %d\n",
+ arvif->vdev_id, arvif->dtim_period);
+
+ vdev_param = ar->wmi.vdev_param->dtim_period;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ arvif->dtim_period);
+ if (ret)
+ ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = info->ssid_len;
+ if (info->ssid_len)
+ memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ ath10k_control_beaconing(arvif, info);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ arvif->use_cts_prot = info->use_cts_prot;
+
+ ret = ath10k_recalc_rtscts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ if (ath10k_mac_can_set_cts_prot(arvif)) {
+ ret = ath10k_mac_set_cts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
+ arvif->vdev_id, slottime);
+
+ vdev_param = ar->wmi.vdev_param->slot_time;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ slottime);
+ if (ret)
+ ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d preamble %dn",
+ arvif->vdev_id, preamble);
+
+ vdev_param = ar->wmi.vdev_param->preamble;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ preamble);
+ if (ret)
+ ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (info->assoc) {
+ /* Workaround: Make sure monitor vdev is not running
+ * when associating to prevent some firmware revisions
+ * (e.g. 10.1 and 10.2) from crashing.
+ */
+ if (ar->monitor_started)
+ ath10k_monitor_stop(ar);
+ ath10k_bss_assoc(hw, vif, info);
+ ath10k_monitor_recalc(ar);
+ } else {
+ ath10k_bss_disassoc(hw, vif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ret = ath10k_mac_txpower_recalc(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ arvif->ps = vif->bss_conf.ps;
+
+ ret = ath10k_config_ps(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath10k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+ rateidx = vif->bss_conf.mcast_rate[band] - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate;
+ hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value;
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH10K_HW_RATECODE(hw_value, 0, preamble);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = ar->wmi.vdev_param->mcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = ar->wmi.vdev_param->bcast_data_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath10k_warn(ar,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
+{
+ struct ath10k *ar = hw->priv;
+
+ /* This function should never be called if setting the coverage class
+ * is not supported on this hardware.
+ */
+ if (!ar->hw_params.hw_ops->set_coverage_class) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+ ar->hw_params.hw_ops->set_coverage_class(ar, value);
+}
+
+struct ath10k_mac_tdls_iter_data {
+ u32 num_tdls_stations;
+ struct ieee80211_vif *curr_vif;
+};
+
+static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_mac_tdls_iter_data *iter_data = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+ if (sta->tdls && sta_vif == iter_data->curr_vif)
+ iter_data->num_tdls_stations++;
+}
+
+static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_tdls_iter_data data = {};
+
+ data.curr_vif = vif;
+
+ ieee80211_iterate_stations_atomic(hw,
+ ath10k_mac_tdls_vif_stations_count_iter,
+ &data);
+ return data.num_tdls_stations;
+}
+
+static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int *num_tdls_vifs = data;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
+ (*num_tdls_vifs)++;
+}
+
+static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
+{
+ int num_tdls_vifs = 0;
+
+ ieee80211_iterate_active_interfaces_atomic(hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_tdls_vifs_count_iter,
+ &num_tdls_vifs);
+ return num_tdls_vifs;
+}
+
+static int ath10k_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct wmi_start_scan_arg arg;
+ int ret = 0;
+ int i;
+ u32 scan_timeout;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH10K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH10K_SCAN_STARTING:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ memset(&arg, 0, sizeof(arg));
+ ath10k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH10K_SCAN_ID;
+
+ if (req->ie_len) {
+ arg.ie_len = req->ie_len;
+ memcpy(arg.ie, req->ie, arg.ie_len);
+ }
+
+ if (req->n_ssids) {
+ arg.n_ssids = req->n_ssids;
+ for (i = 0; i < arg.n_ssids; i++) {
+ arg.ssids[i].len = req->ssids[i].ssid_len;
+ arg.ssids[i].ssid = req->ssids[i].ssid;
+ }
+ } else {
+ arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ }
+
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
+ ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
+ ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
+ }
+
+ if (req->n_channels) {
+ arg.n_channels = req->n_channels;
+ for (i = 0; i < arg.n_channels; i++)
+ arg.channels[i] = req->channels[i]->center_freq;
+ }
+
+ /* if duration is set, default dwell times will be overwritten */
+ if (req->duration) {
+ arg.dwell_time_active = req->duration;
+ arg.dwell_time_passive = req->duration;
+ arg.burst_duration_ms = req->duration;
+
+ scan_timeout = min_t(u32, arg.max_rest_time *
+ (arg.n_channels - 1) + (req->duration +
+ ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
+ arg.n_channels, arg.max_scan_time + 200);
+
+ } else {
+ /* Add a 200ms margin to account for event/command processing */
+ scan_timeout = arg.max_scan_time + 200;
+ }
+
+ ret = ath10k_start_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH10K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(scan_timeout));
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath10k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ enum set_key_cmd cmd,
+ struct ieee80211_key_conf *key)
+{
+ u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
+ int ret;
+
+ /* 10.1 firmware branch requires default key index to be set to group
+ * key index after installing it. Otherwise FW/HW Txes corrupted
+ * frames with multi-vif APs. This is not required for main firmware
+ * branch (e.g. 636).
+ *
+ * This is also needed for 636 fw for IBSS-RSN to work more reliably.
+ *
+ * FIXME: It remains unknown if this is required for multi-vif STA
+ * interfaces on 10.1.
+ */
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
+ return;
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ return;
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ return;
+
+ if (cmd != SET_KEY)
+ return;
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
+ key->keyidx);
+ if (ret)
+ ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
+ arvif->vdev_id, ret);
+}
+
+static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_peer *peer;
+ const u8 *peer_addr;
+ bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104;
+ int ret = 0;
+ int ret2;
+ u32 flags = 0;
+ u32 flags2;
+
+ /* this one needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (arvif->nohwcrypt)
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ if (is_wep) {
+ if (cmd == SET_KEY)
+ arvif->wep_keys[key->keyidx] = key;
+ else
+ arvif->wep_keys[key->keyidx] = NULL;
+ }
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now.
+ */
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable anymore */
+ goto exit;
+ }
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+
+ if (is_wep) {
+ if (cmd == DISABLE_KEY)
+ ath10k_clear_vdev_key(arvif, key);
+
+ /* When WEP keys are uploaded it's possible that there are
+ * stations associated already (e.g. when merging) without any
+ * keys. Static WEP needs an explicit per-peer key upload.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ cmd == SET_KEY)
+ ath10k_mac_vif_update_wep_key(arvif, key);
+
+ /* 802.1x never sets the def_wep_key_idx so each set_key()
+ * call changes default tx key.
+ *
+ * Static WEP sets def_wep_key_idx via .set_default_unicast_key
+ * after first set_key().
+ */
+ if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
+ flags |= WMI_KEY_TX_USAGE;
+ }
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ WARN_ON(ret > 0);
+ ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
+ goto exit;
+ }
+
+ /* mac80211 sets static WEP keys as groupwise while firmware requires
+ * them to be installed twice as both pairwise and groupwise.
+ */
+ if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
+ flags2 = flags;
+ flags2 &= ~WMI_KEY_GROUP;
+ flags2 |= WMI_KEY_PAIRWISE;
+
+ ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
+ if (ret) {
+ WARN_ON(ret > 0);
+ ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret);
+ ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
+ peer_addr, flags);
+ if (ret2) {
+ WARN_ON(ret2 > 0);
+ ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
+ arvif->vdev_id, peer_addr, ret2);
+ }
+ goto exit;
+ }
+ }
+
+ ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY)
+ peer->keys[key->keyidx] = key;
+ else if (peer && cmd == DISABLE_KEY)
+ peer->keys[key->keyidx] = NULL;
+ else if (peer == NULL)
+ /* impossible unless FW goes crazy */
+ ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (sta && sta->tdls)
+ ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_AUTHORIZE, 1);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int keyidx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&arvif->ar->conf_mutex);
+
+ if (arvif->ar->state != ATH10K_STATE_ON)
+ goto unlock;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
+ arvif->vdev_id, keyidx);
+
+ ret = ath10k_wmi_vdev_set_param(arvif->ar,
+ arvif->vdev_id,
+ arvif->ar->wmi.vdev_param->def_keyid,
+ keyidx);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
+ arvif->vdev_id,
+ ret);
+ goto unlock;
+ }
+
+ arvif->def_wep_key_idx = keyidx;
+
+unlock:
+ mutex_unlock(&arvif->ar->conf_mutex);
+}
+
+static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath10k *ar;
+ struct ath10k_vif *arvif;
+ struct ath10k_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u32 changed, bw, nss, smps;
+ int err;
+
+ arsta = container_of(wk, struct ath10k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ enum wmi_phy_mode mode;
+
+ mode = chan_to_phymode(&def);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
+ sta->addr, bw, mode);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_PHYMODE, mode);
+ if (err) {
+ ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, mode, err);
+ goto exit;
+ }
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_CHAN_WIDTH, bw);
+ if (err)
+ ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_SMPS_STATE, smps);
+ if (err)
+ ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
+ sta->addr);
+
+ err = ath10k_station_assoc(ar, arvif->vif, sta, true);
+ if (err)
+ ath10k_warn(ar, "failed to reassociate station: %pM\n",
+ sta->addr);
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return;
+
+ ar->num_stations--;
+}
+
+static int ath10k_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_peer *peer;
+ int ret = 0;
+ int i;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_init(sta->txq[i]);
+ }
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST))
+ cancel_work_sync(&arsta->update_wk);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ /*
+ * New station addition.
+ */
+ enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
+ u32 num_tdls_stations;
+ u32 num_tdls_vifs;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
+ arvif->vdev_id, sta->addr,
+ ar->num_stations + 1, ar->max_num_stations,
+ ar->num_peers + 1, ar->max_num_peers);
+
+ num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
+ num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
+
+ if (sta->tdls) {
+ if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
+ ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
+ arvif->vdev_id,
+ ar->max_num_tdls_vdevs);
+ ret = -ELNRNG;
+ goto exit;
+ }
+ peer_type = WMI_PEER_TYPE_TDLS;
+ }
+
+ ret = ath10k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
+ sta->addr, peer_type);
+ if (ret) {
+ ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ arsta->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!sta->tdls)
+ goto exit;
+
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_ENABLE_ACTIVE);
+ if (ret) {
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+ arvif->vdev_id, ret);
+ ath10k_peer_delete(ar, arvif->vdev_id,
+ sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ goto exit;
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_PEERING);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+
+ if (num_tdls_stations != 0)
+ goto exit;
+ ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
+ }
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ /*
+ * Existing station deletion.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
+ arvif->vdev_id, sta->addr, sta);
+
+ if (sta->tdls) {
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
+ sta,
+ WMI_TDLS_PEER_STATE_TEARDOWN);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
+ sta->addr,
+ WMI_TDLS_PEER_STATE_TEARDOWN, ret);
+ }
+
+ ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+
+ ath10k_mac_dec_num_stations(arvif, sta);
+
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->sta == sta) {
+ ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
+ sta->addr, peer, i, arvif->vdev_id);
+ peer->sta = NULL;
+
+ /* Clean up the peer object as well since we
+ * must have failed to do this above.
+ */
+ list_del(&peer->list);
+ ar->peer_map[i] = NULL;
+ kfree(peer);
+ ar->num_peers--;
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_unref(ar, sta->txq[i]);
+
+ if (!sta->tdls)
+ goto exit;
+
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif))
+ goto exit;
+
+ /* This was the last tdls peer in current vif */
+ ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
+ WMI_TDLS_DISABLE);
+ if (ret) {
+ ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
+ arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ /*
+ * New association.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
+ sta->addr);
+
+ ret = ath10k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED &&
+ sta->tdls) {
+ /*
+ * Tdls station authorized.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
+ sta->addr);
+
+ ret = ath10k_station_assoc(ar, vif, sta, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto exit;
+ }
+
+ ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
+ WMI_TDLS_PEER_STATE_CONNECTED);
+ if (ret)
+ ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ /*
+ * Disassociation.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
+ sta->addr);
+
+ ret = ath10k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_sta_uapsd_auto_trig_arg arg = {};
+ u32 prio = 0, acc = 0;
+ u32 value = 0;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ prio = 7;
+ acc = 3;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ prio = 5;
+ acc = 2;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ prio = 2;
+ acc = 1;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ prio = 0;
+ acc = 0;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
+
+ ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
+ test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
+ /* Only userspace can make an educated decision when to send
+ * trigger frame. The following effectively disables u-UAPSD
+ * autotrigger in firmware (which is enabled by default
+ * provided the autotrigger service is available).
+ */
+
+ arg.wmm_ac = acc;
+ arg.user_priority = prio;
+ arg.service_interval = 0;
+ arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+ arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
+
+ ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
+ arvif->bssid, &arg, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int ath10k_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+
+ /*
+ * The channel time duration programmed in the HW is in absolute
+ * microseconds, while mac80211 gives the txop in units of
+ * 32 microseconds.
+ */
+ p->txop = params->txop * 32;
+
+ if (ar->wmi.ops->gen_vdev_wmm_conf) {
+ ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
+ }
+ } else {
+ /* This won't work well with multi-interface cases but it's
+ * better than nothing.
+ */
+ ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+ }
+
+ ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+ if (ret)
+ ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
+
+static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct wmi_start_scan_arg arg;
+ int ret = 0;
+ u32 scan_time_msec;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH10K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH10K_SCAN_STARTING:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
+
+ memset(&arg, 0, sizeof(arg));
+ ath10k_wmi_start_scan_init(ar, &arg);
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH10K_SCAN_ID;
+ arg.n_channels = 1;
+ arg.channels[0] = chan->center_freq;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
+ arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg.burst_duration_ms = duration;
+
+ ret = ath10k_start_scan(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH10K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto exit;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "failed to switch to channel for roc scan\n");
+
+ ret = ath10k_scan_stop(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to stop scan: %d\n", ret);
+
+ ret = -ETIMEDOUT;
+ goto exit;
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+/*
+ * Both RTS and Fragmentation threshold are interface-specific
+ * in ath10k, but device-specific in mac80211.
+ */
+
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
+ arvif->vdev_id, value);
+
+ ret = ath10k_mac_set_rts(arvif, value);
+ if (ret) {
+ ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ /* Even though there's a WMI enum for fragmentation threshold no known
+ * firmware actually implements it. Moreover it is not possible to rely
+ * frame fragmentation to mac80211 because firmware clears the "more
+ * fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
+static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath10k *ar = hw->priv;
+ bool skip;
+ long time_left;
+
+ /* mac80211 doesn't care if we really xmit queued frames or not
+ * we'll collect those frames either way if we stop/delete vdevs
+ */
+ if (drop)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_WEDGED)
+ goto skip;
+
+ time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
+ bool empty;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ empty = (ar->htt.num_pending_tx == 0);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ skip = (ar->state == ATH10K_STATE_WEDGED) ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH,
+ &ar->dev_flags);
+
+ (empty || skip);
+ }), ATH10K_FLUSH_TIMEOUT_HZ);
+
+ if (time_left == 0 || skip)
+ ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
+ skip, ar->state, time_left);
+
+skip:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* TODO: Implement this function properly
+ * For now it is needed to reply to Probe Requests in IBSS mode.
+ * Propably we need this information from FW.
+ */
+static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ return 1;
+}
+
+static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath10k *ar = hw->priv;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* If device failed to restart it will be in a different state, e.g.
+ * ATH10K_STATE_WEDGED
+ */
+ if (ar->state == ATH10K_STATE_RESTARTED) {
+ ath10k_info(ar, "device successfully recovered\n");
+ ar->state = ATH10K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
+ (ar->rx_channel != channel))
+ return;
+
+ if (ar->scan.state != ATH10K_SCAN_IDLE) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath10k_warn(ar, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (!ret) {
+ ath10k_warn(ar, "bss channel survey timed out\n");
+ return;
+ }
+}
+
+static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey = &ar->survey[idx];
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static bool
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ num_rates += hweight32(mask->control[band].legacy);
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight8(mask->control[band].ht_mcs[i]);
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+ return num_rates == 1;
+}
+
+static bool
+ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ int i;
+
+ if (mask->control[band].legacy)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask)
+ return false;
+
+ if (ht_nss_mask == 0)
+ return false;
+
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+ return false;
+
+ *nss = fls(ht_nss_mask);
+
+ return true;
+}
+
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u8 *rate, u8 *nss)
+{
+ int rate_idx;
+ int i;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (hweight32(mask->control[band].legacy) == 1) {
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
+ bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
+
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = preamble << 6 |
+ (*nss - 1) << 4 |
+ hw_rate << 0;
+
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_HT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].ht_mcs[i]) - 1);
+
+ return 0;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_VHT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].vht_mcs[i]) - 1);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
+ u8 rate, u8 nss, u8 sgi, u8 ldpc)
+{
+ struct ath10k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
+ arvif->vdev_id, rate, nss, sgi);
+
+ vdev_param = ar->wmi.vdev_param->fixed_rate;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
+ if (ret) {
+ ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+
+ vdev_param = ar->wmi.vdev_param->nss;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
+ return ret;
+ }
+
+ vdev_param = ar->wmi.vdev_param->sgi;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
+ if (ret) {
+ ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
+ return ret;
+ }
+
+ vdev_param = ar->wmi.vdev_param->ldpc;
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
+ if (ret) {
+ ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
+
+ /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
+ * to express all VHT MCS rate masks. Effectively only the following
+ * ranges can be used: none, 0-7, 0-8 and 0-9.
+ */
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ath10k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_vif *arvif = data;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arvif->ar;
+
+ if (arsta->arvif != arvif)
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
+ struct ath10k *ar = arvif->ar;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ u8 rate;
+ u8 nss;
+ u8 sgi;
+ u8 ldpc;
+ int single_nss;
+ int ret;
+
+ if (ath10k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
+
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
+ ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &rate, &nss);
+ if (ret) {
+ ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = min(ar->num_rf_chains,
+ max(ath10k_mac_max_ht_nss(ht_mcs_mask),
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath10k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ if (ret) {
+ ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto exit;
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_peer *peer;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->bandwidth, sta->rx_nss,
+ sta->smps_mode);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
+ sta->bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ case IEEE80211_SMPS_NUM_MODES:
+ ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
+ sta->smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static void ath10k_offset_tsf(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, s64 tsf_offset)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ u32 offset, vdev_param;
+ int ret;
+
+ if (tsf_offset < 0) {
+ vdev_param = ar->wmi.vdev_param->dec_tsf;
+ offset = -tsf_offset;
+ } else {
+ vdev_param = ar->wmi.vdev_param->inc_tsf;
+ offset = tsf_offset;
+ }
+
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, offset);
+
+ if (ret && ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n",
+ offset, vdev_param, ret);
+}
+
+static int ath10k_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
+ arvif->vdev_id, sta->addr, tid, action);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
+ * creation/removal. Do we need to verify this?
+ */
+ return 0;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Firmware offloads Tx aggregation entirely so deny mac80211
+ * Tx aggregation requests.
+ */
+ return -EOPNOTSUPP;
+ }
+
+ return -EINVAL;
+}
+
+static void
+ath10k_mac_update_rx_channel(struct ath10k *ar,
+ struct ieee80211_chanctx_conf *ctx,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct cfg80211_chan_def *def = NULL;
+
+ /* Both locks are required because ar->rx_channel is modified. This
+ * allows readers to hold either lock.
+ */
+ lockdep_assert_held(&ar->conf_mutex);
+ lockdep_assert_held(&ar->data_lock);
+
+ WARN_ON(ctx && vifs);
+ WARN_ON(vifs && !n_vifs);
+
+ /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
+ * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
+ * ppdu on Rx may reduce performance on low-end systems. It should be
+ * possible to make tables/hashmaps to speed the lookup up (be vary of
+ * cpu data cache lines though regarding sizes) but to keep the initial
+ * implementation simple and less intrusive fallback to the slow lookup
+ * only for multi-channel cases. Single-channel cases will remain to
+ * use the old channel derival and thus performance should not be
+ * affected much.
+ */
+ rcu_read_lock();
+ if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath10k_mac_get_any_chandef_iter,
+ &def);
+
+ if (vifs)
+ def = &vifs[0].new_ctx->def;
+
+ ar->rx_channel = def->chan;
+ } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
+ (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
+ /* During driver restart due to firmware assert, since mac80211
+ * already has valid channel context for given radio, channel
+ * context iteration return num_chanctx > 0. So fix rx_channel
+ * when restart is in progress.
+ */
+ ar->rx_channel = ctx->def.chan;
+ } else {
+ ar->rx_channel = NULL;
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath10k_mac_update_vif_chan(struct ath10k *ar,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* First stop monitor interface. Some FW versions crash if there's a
+ * lone monitor interface.
+ */
+ if (ar->monitor_started)
+ ath10k_monitor_stop(ar);
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
+ arvif->vdev_id,
+ vifs[i].old_ctx->def.chan->center_freq,
+ vifs[i].new_ctx->def.chan->center_freq,
+ vifs[i].old_ctx->def.width,
+ vifs[i].new_ctx->def.width);
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to down vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* All relevant vdevs are downed and associated channel resources
+ * should be available for the channel switch now.
+ */
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = (void *)vifs[i].vif->drv_priv;
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ if (WARN_ON(!arvif->is_up))
+ continue;
+
+ ret = ath10k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_mac_setup_prb_tmpl(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
+ ret);
+
+ ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
+ if (ret) {
+ ath10k_warn(ar, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid);
+ if (ret) {
+ ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ ath10k_monitor_recalc(ar);
+}
+
+static int
+ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx add freq %hu width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void
+ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx remove freq %hu width %d ptr %pK\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_recalc_radar_detection(ar);
+ ath10k_monitor_recalc(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+struct ath10k_mac_change_chanctx_arg {
+ struct ieee80211_chanctx_conf *ctx;
+ struct ieee80211_vif_chanctx_switch *vifs;
+ int n_vifs;
+ int next_vif;
+};
+
+static void
+ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_change_chanctx_arg *arg = data;
+
+ if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
+ return;
+
+ arg->n_vifs++;
+}
+
+static void
+ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_chanctx_conf *ctx;
+
+ ctx = rcu_access_pointer(vif->chanctx_conf);
+ if (ctx != arg->ctx)
+ return;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->next_vif++;
+}
+
+static void
+ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
+ ieee80211_iterate_active_interfaces_atomic(
+ hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_change_chanctx_cnt_iter,
+ &arg);
+ if (arg.n_vifs == 0)
+ goto radar;
+
+ arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
+ GFP_KERNEL);
+ if (!arg.vifs)
+ goto radar;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_mac_change_chanctx_fill_iter,
+ &arg);
+ ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+ kfree(arg.vifs);
+ }
+
+radar:
+ ath10k_recalc_radar_detection(ar);
+
+ /* FIXME: How to configure Rx chains properly? */
+
+ /* No other actions are actually necessary. Firmware maintains channel
+ * definitions per vdev internally and there's no host-side channel
+ * context abstraction to configure, e.g. channel width.
+ */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx assign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ if (WARN_ON(arvif->is_started)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EBUSY;
+ }
+
+ ret = ath10k_vdev_start(arvif, &ctx->def);
+ if (ret) {
+ ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto err;
+ }
+
+ arvif->is_started = true;
+
+ ret = ath10k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
+ arvif->vdev_id, ret);
+ goto err_stop;
+ }
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
+ if (ret) {
+ ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err_stop;
+ }
+
+ arvif->is_up = true;
+ }
+
+ if (ath10k_mac_can_set_cts_prot(arvif)) {
+ ret = ath10k_mac_set_cts_prot(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (ath10k_peer_stats_enabled(ar)) {
+ ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
+ ret = ath10k_wmi_pdev_pktlog_enable(ar,
+ ar->pktlog_filter);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
+ goto err_stop;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_stop:
+ ath10k_vdev_stop(arvif);
+ arvif->is_started = false;
+ ath10k_mac_vif_setup_ps(arvif);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void
+ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx unassign ptr %pK vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR) {
+ WARN_ON(!arvif->is_up);
+
+ ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ }
+
+ ret = ath10k_vdev_stop(arvif);
+ if (ret)
+ ath10k_warn(ar, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+ ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+}
+
+static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k *ar;
+ struct ath10k_peer *peer;
+
+ ar = hw->priv;
+
+ list_for_each_entry(peer, &ar->peers, list)
+ if (peer->sta == sta)
+ peer->removed = true;
+}
+
+static void ath10k_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k *ar = arsta->arvif->ar;
+
+ if (!ath10k_peer_stats_enabled(ar))
+ return;
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ if (!arsta->txrate.legacy && !arsta->txrate.nss)
+ return;
+
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
+static const struct ieee80211_ops ath10k_ops = {
+ .tx = ath10k_mac_op_tx,
+ .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
+ .start = ath10k_start,
+ .stop = ath10k_stop,
+ .config = ath10k_config,
+ .add_interface = ath10k_add_interface,
+ .remove_interface = ath10k_remove_interface,
+ .configure_filter = ath10k_configure_filter,
+ .bss_info_changed = ath10k_bss_info_changed,
+ .set_coverage_class = ath10k_mac_op_set_coverage_class,
+ .hw_scan = ath10k_hw_scan,
+ .cancel_hw_scan = ath10k_cancel_hw_scan,
+ .set_key = ath10k_set_key,
+ .set_default_unicast_key = ath10k_set_default_unicast_key,
+ .sta_state = ath10k_sta_state,
+ .conf_tx = ath10k_conf_tx,
+ .remain_on_channel = ath10k_remain_on_channel,
+ .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
+ .set_rts_threshold = ath10k_set_rts_threshold,
+ .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
+ .flush = ath10k_flush,
+ .tx_last_beacon = ath10k_tx_last_beacon,
+ .set_antenna = ath10k_set_antenna,
+ .get_antenna = ath10k_get_antenna,
+ .reconfig_complete = ath10k_reconfig_complete,
+ .get_survey = ath10k_get_survey,
+ .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
+ .sta_rc_update = ath10k_sta_rc_update,
+ .offset_tsf = ath10k_offset_tsf,
+ .ampdu_action = ath10k_ampdu_action,
+ .get_et_sset_count = ath10k_debug_get_et_sset_count,
+ .get_et_stats = ath10k_debug_get_et_stats,
+ .get_et_strings = ath10k_debug_get_et_strings,
+ .add_chanctx = ath10k_mac_op_add_chanctx,
+ .remove_chanctx = ath10k_mac_op_remove_chanctx,
+ .change_chanctx = ath10k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
+ .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
+ .sta_statistics = ath10k_sta_statistics,
+
+ CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
+
+#ifdef CONFIG_PM
+ .suspend = ath10k_wow_op_suspend,
+ .resume = ath10k_wow_op_resume,
+ .set_wakeup = ath10k_wow_op_set_wakeup,
+#endif
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .sta_add_debugfs = ath10k_sta_add_debugfs,
+#endif
+};
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath10k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath10k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+ /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */
+ /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */
+};
+
+struct ath10k *ath10k_mac_create(size_t priv_size)
+{
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct ath10k *ar;
+
+ ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
+ if (!ops)
+ return NULL;
+
+ hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
+ if (!hw) {
+ kfree(ops);
+ return NULL;
+ }
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->ops = ops;
+
+ return ar;
+}
+
+void ath10k_mac_destroy(struct ath10k *ar)
+{
+ struct ieee80211_ops *ops = ar->ops;
+
+ ieee80211_free_hw(ar->hw);
+ kfree(ops);
+}
+
+static const struct ieee80211_iface_limit ath10k_if_limits[] = {
+ {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ | BIT(NL80211_IFTYPE_P2P_CLIENT)
+ },
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_P2P_GO)
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ },
+ {
+ .max = 7,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
+ {
+ .max = 8,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_if_comb[] = {
+ {
+ .limits = ath10k_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
+ {
+ .limits = ath10k_10x_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .beacon_int_min_gcd = 1,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+#ifdef CONFIG_MAC80211_MESH
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+#endif
+ BIT(NL80211_IFTYPE_P2P_GO),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC),
+ },
+};
+
+/* FIXME: This is not thouroughly tested. These combinations may over- or
+ * underestimate hw/fw capabilities.
+ */
+static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 1,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
+static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
+ {
+ .limits = ath10k_tlv_if_limit,
+ .num_different_channels = 1,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_qcs_if_limit,
+ .num_different_channels = 2,
+ .max_interfaces = 4,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
+ },
+ {
+ .limits = ath10k_tlv_if_limit_ibss,
+ .num_different_channels = 1,
+ .max_interfaces = 2,
+ .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
+ },
+};
+
+static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 16,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ },
+};
+
+static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
+ {
+ .limits = ath10k_10_4_if_limits,
+ .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
+ .max_interfaces = 16,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .beacon_int_min_gcd = 1,
+#ifdef CONFIG_ATH10K_DFS_CERTIFIED
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80),
+#endif
+ },
+};
+
+static void ath10k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif_iter *arvif_iter = data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
+{
+ struct ath10k_vif_iter arvif_iter;
+ u32 flags;
+
+ memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath10k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif) {
+ ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
+ return NULL;
+ }
+
+ return arvif_iter.arvif;
+}
+
+#define WRD_METHOD "WRDD"
+#define WRDD_WIFI (0x07)
+
+static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
+{
+ union acpi_object *mcc_pkg;
+ union acpi_object *domain_type;
+ union acpi_object *mcc_value;
+ u32 i;
+
+ if (wrdd->type != ACPI_TYPE_PACKAGE ||
+ wrdd->package.count < 2 ||
+ wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ wrdd->package.elements[0].integer.value != 0) {
+ ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n");
+ return 0;
+ }
+
+ for (i = 1; i < wrdd->package.count; ++i) {
+ mcc_pkg = &wrdd->package.elements[i];
+
+ if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
+ continue;
+ if (mcc_pkg->package.count < 2)
+ continue;
+ if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ continue;
+
+ domain_type = &mcc_pkg->package.elements[0];
+ if (domain_type->integer.value != WRDD_WIFI)
+ continue;
+
+ mcc_value = &mcc_pkg->package.elements[1];
+ return mcc_value->integer.value;
+ }
+ return 0;
+}
+
+static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
+{
+ struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev);
+ acpi_handle root_handle;
+ acpi_handle handle;
+ struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+ u32 alpha2_code;
+ char alpha2[3];
+
+ root_handle = ACPI_HANDLE(&pdev->dev);
+ if (!root_handle)
+ return -EOPNOTSUPP;
+
+ status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to get wrd method %d\n", status);
+ return -EIO;
+ }
+
+ status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
+ if (ACPI_FAILURE(status)) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to call wrdc %d\n", status);
+ return -EIO;
+ }
+
+ alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer);
+ kfree(wrdd.pointer);
+ if (!alpha2_code)
+ return -EIO;
+
+ alpha2[0] = (alpha2_code >> 8) & 0xff;
+ alpha2[1] = (alpha2_code >> 0) & 0xff;
+ alpha2[2] = '\0';
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
+
+ *rd = ath_regd_find_country_by_name(alpha2);
+ if (*rd == 0xffff)
+ return -EIO;
+
+ *rd |= COUNTRY_ERD_FLAG;
+ return 0;
+}
+
+static int ath10k_mac_init_rd(struct ath10k *ar)
+{
+ int ret;
+ u16 rd;
+
+ ret = ath10k_mac_get_wrdd_regulatory(ar, &rd);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "fallback to eeprom programmed regulatory settings\n");
+ rd = ar->hw_eeprom_rd;
+ }
+
+ ar->ath_common.regulatory.current_rd = rd;
+ return 0;
+}
+
+int ath10k_mac_register(struct ath10k *ar)
+{
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_WEP40,
+ WLAN_CIPHER_SUITE_WEP104,
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+
+ /* Do not add hardware supported ciphers before this line.
+ * Allow software encryption for all chips. Don't forget to
+ * update n_cipher_suites below.
+ */
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+
+ /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256
+ * and CCMP-256 in hardware.
+ */
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ };
+ struct ieee80211_supported_band *band;
+ void *channels;
+ int ret;
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ar->dev);
+
+ BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
+ ARRAY_SIZE(ath10k_5ghz_channels)) !=
+ ATH10K_NUM_CHANS);
+
+ if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
+ channels = kmemdup(ath10k_2ghz_channels,
+ sizeof(ath10k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
+ band->channels = channels;
+
+ if (ar->hw_params.cck_rate_map_rev2) {
+ band->n_bitrates = ath10k_g_rates_rev2_size;
+ band->bitrates = ath10k_g_rates_rev2;
+ } else {
+ band->n_bitrates = ath10k_g_rates_size;
+ band->bitrates = ath10k_g_rates;
+ }
+
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+ }
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
+ channels = kmemdup(ath10k_5ghz_channels,
+ sizeof(ath10k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath10k_a_rates_size;
+ band->bitrates = ath10k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+ }
+
+ wiphy_read_of_freq_limits(ar->hw->wiphy);
+ ath10k_mac_setup_ht_vht_cap(ar);
+
+ ar->hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
+ ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
+ ar->hw->wiphy->interface_modes |=
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+
+ if (!test_bit(ATH10K_FW_FEATURE_NO_PS,
+ ar->running_fw->fw_file.fw_features)) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ }
+
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+ }
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->vif_data_size = sizeof(struct ath10k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+ ar->hw->txq_data_size = sizeof(struct ath10k_txq);
+
+ ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
+
+ if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ /* Firmware delivers WPS/P2P Probe Requests frames to driver so
+ * that userspace (e.g. wpa_supplicant/hostapd) can generate
+ * correct Probe Responses. This is more of a hack advert..
+ */
+ ar->hw->wiphy->probe_resp_offload |=
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+ }
+
+ if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
+ test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
+ ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+ if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
+ ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
+ }
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
+ ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA);
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
+
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ ret = ath10k_wow_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init wow: %d\n", ret);
+ goto err_free;
+ }
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+
+ /*
+ * on LL hardware queues are managed entirely by the FW
+ * so we only advertise to mac we can do the queues thing
+ */
+ ar->hw->queues = IEEE80211_MAX_QUEUES;
+
+ /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
+ * something that vdev_ids can't reach so that we don't stop the queue
+ * accidentally.
+ */
+ ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
+
+ switch (ar->running_fw->fw_file.wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->hw->wiphy->iface_combinations = ath10k_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_if_comb);
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
+ ar->hw->wiphy->iface_combinations =
+ ath10k_tlv_qcs_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
+ } else {
+ ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_tlv_if_comb);
+ }
+ ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10x_if_comb);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
+ ar->hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(ath10k_10_4_if_comb);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err_free;
+ }
+
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ /* Init ath dfs pattern detector */
+ ar->ath_common.debug_mask = ATH_DBG_DFS;
+ ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
+ NL80211_DFS_UNSET);
+
+ if (!ar->dfs_detector)
+ ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
+ }
+
+ ret = ath10k_mac_init_rd(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to derive regdom: %d\n", ret);
+ goto err_dfs_detector_exit;
+ }
+
+ /* Disable set_coverage_class for chipsets that do not support it. */
+ if (!ar->hw_params.hw_ops->set_coverage_class)
+ ar->ops->set_coverage_class = NULL;
+
+ ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
+ ath10k_reg_notifier);
+ if (ret) {
+ ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
+ goto err_dfs_detector_exit;
+ }
+
+ if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
+ ar->hw->wiphy->features |=
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ }
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+
+ /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128
+ * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported
+ * from chip specific hw_param table.
+ */
+ if (!ar->hw_params.n_cipher_suites ||
+ ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) {
+ ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n",
+ ar->hw_params.n_cipher_suites);
+ ar->hw_params.n_cipher_suites = 8;
+ }
+ ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites;
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
+ goto err_dfs_detector_exit;
+ }
+
+ if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
+ ret = regulatory_hint(ar->hw->wiphy,
+ ar->ath_common.regulatory.alpha2);
+ if (ret)
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ ieee80211_unregister_hw(ar->hw);
+
+err_dfs_detector_exit:
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ ar->dfs_detector->exit(ar->dfs_detector);
+
+err_free:
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+ return ret;
+}
+
+void ath10k_mac_unregister(struct ath10k *ar)
+{
+ ieee80211_unregister_hw(ar->hw);
+
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ ar->dfs_detector->exit(ar->dfs_detector);
+
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
new file mode 100644
index 000000000..81f8d6c0a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _MAC_H_
+#define _MAC_H_
+
+#include <net/mac80211.h>
+#include "core.h"
+
+#define WEP_KEYID_SHIFT 6
+
+enum wmi_tlv_tx_pause_id;
+enum wmi_tlv_tx_pause_action;
+
+struct ath10k_generic_iter {
+ struct ath10k *ar;
+ int ret;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+struct ath10k *ath10k_mac_create(size_t priv_size);
+void ath10k_mac_destroy(struct ath10k *ar);
+int ath10k_mac_register(struct ath10k *ar);
+void ath10k_mac_unregister(struct ath10k *ar);
+struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
+void __ath10k_scan_finish(struct ath10k *ar);
+void ath10k_scan_finish(struct ath10k *ar);
+void ath10k_scan_timeout_work(struct work_struct *work);
+void ath10k_offchan_tx_purge(struct ath10k *ar);
+void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
+void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
+void ath10k_halt(struct ath10k *ar);
+void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
+void ath10k_drain_tx(struct ath10k *ar);
+bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
+ u8 keyidx);
+int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def);
+
+void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id);
+void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tlv_tx_pause_id pause_id,
+ enum wmi_tlv_tx_pause_action action);
+
+u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck);
+u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+
+void ath10k_mac_tx_lock(struct ath10k *ar, int reason);
+void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
+void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
+void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
+bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
+void ath10k_mac_tx_push_pending(struct ath10k *ar);
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid);
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
+
+static inline void ath10k_tx_h_seq_no(struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
+ if (arvif->tx_seq_no == 0)
+ arvif->tx_seq_no = 0x1000;
+
+ if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
+ arvif->tx_seq_no += 0x10;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(arvif->tx_seq_no);
+ }
+}
+
+#endif /* _MAC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/p2p.c b/drivers/net/wireless/ath/ath10k/p2p.c
new file mode 100644
index 000000000..7e621ee19
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath10k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 ctwindow_oppps = noa->ctwindow_oppps;
+ u8 ctwindow = ctwindow_oppps >> WMI_P2P_OPPPS_CTWINDOW_OFFSET;
+ bool oppps = !!(ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT);
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ u8 noa_descriptors = noa->num_descriptors;
+ int i;
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = noa->index;
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count =
+ __le32_to_cpu(noa->descriptors[i].type_count);
+ noa_attr->desc[i].duration = noa->descriptors[i].duration;
+ noa_attr->desc[i].interval = noa->descriptors[i].interval;
+ noa_attr->desc[i].start_time = noa->descriptors[i].start_time;
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t ath10k_p2p_noa_ie_len_compute(const struct wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+
+ if (!noa->num_descriptors &&
+ !(noa->ctwindow_oppps & WMI_P2P_OPPPS_ENABLE_BIT))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa->num_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath10k_p2p_noa_ie_assign(struct ath10k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath10k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath10k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath10k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath10k_p2p_noa_ie_fill(ie, len, noa);
+ ath10k_p2p_noa_ie_assign(arvif, ie, len);
+}
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath10k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+struct ath10k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct wmi_p2p_noa_info *noa;
+};
+
+static void ath10k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath10k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa)
+{
+ struct ath10k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath10k/p2p.h b/drivers/net/wireless/ath/ath10k/p2p.h
new file mode 100644
index 000000000..7be616e2e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/p2p.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _P2P_H
+#define _P2P_H
+
+struct ath10k_vif;
+struct wmi_p2p_noa_info;
+
+void ath10k_p2p_noa_update(struct ath10k_vif *arvif,
+ const struct wmi_p2p_noa_info *noa);
+void ath10k_p2p_noa_update_by_vdev_id(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_p2p_noa_info *noa);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
new file mode 100644
index 000000000..caece8339
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -0,0 +1,3798 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/bitops.h>
+
+#include "core.h"
+#include "debug.h"
+#include "coredump.h"
+
+#include "targaddrs.h"
+#include "bmi.h"
+
+#include "hif.h"
+#include "htc.h"
+
+#include "ce.h"
+#include "pci.h"
+
+enum ath10k_pci_reset_mode {
+ ATH10K_PCI_RESET_AUTO = 0,
+ ATH10K_PCI_RESET_WARM_ONLY = 1,
+};
+
+static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
+static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
+
+module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
+MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
+
+module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
+MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
+
+/* how long wait to wait for target to initialise, in ms */
+#define ATH10K_PCI_TARGET_WAIT 3000
+#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
+
+/* Maximum number of bytes that can be handled atomically by
+ * diag read and write.
+ */
+#define ATH10K_DIAG_TRANSFER_LIMIT 0x5000
+
+#define QCA99X0_PCIE_BAR0_START_REG 0x81030
+#define QCA99X0_CPU_MEM_ADDR_REG 0x4d00c
+#define QCA99X0_CPU_MEM_DATA_REG 0x4d010
+
+static const struct pci_device_id ath10k_pci_id_table[] = {
+ /* PCI-E QCA988X V2 (Ubiquiti branded) */
+ { PCI_VDEVICE(UBIQUITI, QCA988X_2_0_DEVICE_ID_UBNT) },
+
+ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
+ { PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
+ { PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
+ { PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
+ { PCI_VDEVICE(ATHEROS, QCA9888_2_0_DEVICE_ID) }, /* PCI-E QCA9888 V2 */
+ { PCI_VDEVICE(ATHEROS, QCA9984_1_0_DEVICE_ID) }, /* PCI-E QCA9984 V1 */
+ { PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
+ { PCI_VDEVICE(ATHEROS, QCA9887_1_0_DEVICE_ID) }, /* PCI-E QCA9887 */
+ {0}
+};
+
+static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
+ /* QCA988X pre 2.0 chips are not supported because they need some nasty
+ * hacks. ath10k doesn't have them and these devices crash horribly
+ * because of that.
+ */
+ { QCA988X_2_0_DEVICE_ID_UBNT, QCA988X_HW_2_0_CHIP_ID_REV },
+ { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
+
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+ { QCA6164_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
+ { QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
+
+ { QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
+
+ { QCA9984_1_0_DEVICE_ID, QCA9984_HW_1_0_CHIP_ID_REV },
+
+ { QCA9888_2_0_DEVICE_ID, QCA9888_HW_2_0_CHIP_ID_REV },
+
+ { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
+ { QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_1_CHIP_ID_REV },
+
+ { QCA9887_1_0_DEVICE_ID, QCA9887_HW_1_0_CHIP_ID_REV },
+};
+
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
+static int ath10k_pci_cold_reset(struct ath10k *ar);
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar);
+static int ath10k_pci_init_irq(struct ath10k *ar);
+static int ath10k_pci_deinit_irq(struct ath10k *ar);
+static int ath10k_pci_request_irq(struct ath10k *ar);
+static void ath10k_pci_free_irq(struct ath10k *ar);
+static int ath10k_pci_bmi_wait(struct ath10k *ar,
+ struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer);
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar);
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state);
+
+static struct ce_attr host_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ .send_cb = ath10k_pci_htc_tx_cb,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_pci_htt_htc_rx_cb,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath10k_pci_htc_rx_cb,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath10k_pci_htc_tx_cb,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ .send_cb = ath10k_pci_htt_tx_cb,
+ },
+
+ /* CE5: target->host HTT (HIF->HTT) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_pci_htt_rx_cb,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 2,
+ .src_sz_max = DIAG_TRANSFER_LIMIT,
+ .dest_nentries = 2,
+ },
+
+ /* CE8: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ .recv_cb = ath10k_pci_pktlog_rx_cb,
+ },
+
+ /* CE9 target autonomous qcache memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE10: target autonomous hif memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE11: target autonomous hif memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+/* Target firmware's Copy Engine configuration. */
+static struct ce_pipe_config target_ce_config_wlan[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(64),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* NB: 50% of src nentries, since tx has 2 frags */
+
+ /* CE5: target->host HTT (HIF->HTT) */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(512),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(4096),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host packtlog */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(64),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 target autonomous qcache memcpy */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* It not necessary to send target wlan configuration for CE10 & CE11
+ * as these CEs are not actively used in target.
+ */
+};
+
+/*
+ * Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+static bool ath10k_pci_is_awake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 val = ioread32(ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ RTC_STATE_ADDRESS);
+
+ return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
+}
+
+static void __ath10k_pci_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ lockdep_assert_held(&ar_pci->ps_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake reg refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+}
+
+static void __ath10k_pci_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ lockdep_assert_held(&ar_pci->ps_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep reg refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ ar_pci->ps_awake = false;
+}
+
+static int ath10k_pci_wake_wait(struct ath10k *ar)
+{
+ int tot_delay = 0;
+ int curr_delay = 5;
+
+ while (tot_delay < PCIE_WAKE_TIMEOUT) {
+ if (ath10k_pci_is_awake(ar)) {
+ if (tot_delay > PCIE_WAKE_LATE_US)
+ ath10k_warn(ar, "device wakeup took %d ms which is unusually long, otherwise it works normally.\n",
+ tot_delay / 1000);
+ return 0;
+ }
+
+ udelay(curr_delay);
+ tot_delay += curr_delay;
+
+ if (curr_delay < 50)
+ curr_delay += 5;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int ath10k_pci_force_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+ int ret = 0;
+
+ if (ar_pci->pci_ps)
+ return ret;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ if (!ar_pci->ps_awake) {
+ iowrite32(PCIE_SOC_WAKE_V_MASK,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+
+ ret = ath10k_pci_wake_wait(ar);
+ if (ret == 0)
+ ar_pci->ps_awake = true;
+ }
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+ return ret;
+}
+
+static void ath10k_pci_force_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ iowrite32(PCIE_SOC_WAKE_RESET,
+ ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
+ PCIE_SOC_WAKE_ADDRESS);
+ ar_pci->ps_awake = false;
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static int ath10k_pci_wake(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+ int ret = 0;
+
+ if (ar_pci->pci_ps == 0)
+ return ret;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps wake refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ /* This function can be called very frequently. To avoid excessive
+ * CPU stalls for MMIO reads use a cache var to hold the device state.
+ */
+ if (!ar_pci->ps_awake) {
+ __ath10k_pci_wake(ar);
+
+ ret = ath10k_pci_wake_wait(ar);
+ if (ret == 0)
+ ar_pci->ps_awake = true;
+ }
+
+ if (ret == 0) {
+ ar_pci->ps_wake_refcount++;
+ WARN_ON(ar_pci->ps_wake_refcount == 0);
+ }
+
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+
+ return ret;
+}
+
+static void ath10k_pci_sleep(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ if (ar_pci->pci_ps == 0)
+ return;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps sleep refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ if (WARN_ON(ar_pci->ps_wake_refcount == 0))
+ goto skip;
+
+ ar_pci->ps_wake_refcount--;
+
+ mod_timer(&ar_pci->ps_timer, jiffies +
+ msecs_to_jiffies(ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC));
+
+skip:
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_ps_timer(struct timer_list *t)
+{
+ struct ath10k_pci *ar_pci = from_timer(ar_pci, t, ps_timer);
+ struct ath10k *ar = ar_pci->ar;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI_PS, "pci ps timer refcount %lu awake %d\n",
+ ar_pci->ps_wake_refcount, ar_pci->ps_awake);
+
+ if (ar_pci->ps_wake_refcount > 0)
+ goto skip;
+
+ __ath10k_pci_sleep(ar);
+
+skip:
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_pci_sleep_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ if (ar_pci->pci_ps == 0) {
+ ath10k_pci_force_sleep(ar);
+ return;
+ }
+
+ del_timer_sync(&ar_pci->ps_timer);
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+ WARN_ON(ar_pci->ps_wake_refcount > 0);
+ __ath10k_pci_sleep(ar);
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+static void ath10k_bus_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ if (unlikely(offset + sizeof(value) > ar_pci->mem_len)) {
+ ath10k_warn(ar, "refusing to write mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+ offset, offset + sizeof(value), ar_pci->mem_len);
+ return;
+ }
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake target for write32 of 0x%08x at 0x%08x: %d\n",
+ value, offset, ret);
+ return;
+ }
+
+ iowrite32(value, ar_pci->mem + offset);
+ ath10k_pci_sleep(ar);
+}
+
+static u32 ath10k_bus_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 val;
+ int ret;
+
+ if (unlikely(offset + sizeof(val) > ar_pci->mem_len)) {
+ ath10k_warn(ar, "refusing to read mmio out of bounds at 0x%08x - 0x%08zx (max 0x%08zx)\n",
+ offset, offset + sizeof(val), ar_pci->mem_len);
+ return 0;
+ }
+
+ ret = ath10k_pci_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake target for read32 at 0x%08x: %d\n",
+ offset, ret);
+ return 0xffffffff;
+ }
+
+ val = ioread32(ar_pci->mem + offset);
+ ath10k_pci_sleep(ar);
+
+ return val;
+}
+
+inline void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ ce->bus_ops->write32(ar, offset, value);
+}
+
+inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return ce->bus_ops->read32(ar, offset);
+}
+
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + addr, val);
+}
+
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr)
+{
+ return ath10k_pci_read32(ar, PCIE_LOCAL_BASE_ADDRESS + addr);
+}
+
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ ath10k_pci_write32(ar, PCIE_LOCAL_BASE_ADDRESS + addr, val);
+}
+
+bool ath10k_pci_irq_pending(struct ath10k *ar)
+{
+ u32 cause;
+
+ /* Check if the shared legacy irq is for us */
+ cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_CAUSE_ADDRESS);
+ if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+ return true;
+
+ return false;
+}
+
+void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+{
+ /* IMPORTANT: INTR_CLR register has to be set after
+ * INTR_ENABLE is set to 0, otherwise interrupt can not be
+ * really cleared.
+ */
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer.
+ */
+ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+{
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ /* IMPORTANT: this extra read transaction is required to
+ * flush the posted write buffer.
+ */
+ (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ PCIE_INTR_ENABLE_ADDRESS);
+}
+
+static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
+ return "msi";
+
+ return "legacy";
+}
+
+static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb)
+ return -ENOMEM;
+
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+ ath10k_warn(ar, "failed to dma map pci rx buf\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+
+ ATH10K_SKB_RXCB(skb)->paddr = paddr;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
+ spin_unlock_bh(&ce->ce_lock);
+ if (ret) {
+ dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ int ret, num;
+
+ if (pipe->buf_sz == 0)
+ return;
+
+ if (!ce_pipe->dest_ring)
+ return;
+
+ spin_lock_bh(&ce->ce_lock);
+ num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+ spin_unlock_bh(&ce->ce_lock);
+
+ while (num >= 0) {
+ ret = __ath10k_pci_rx_post_buf(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ break;
+ ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
+ mod_timer(&ar_pci->rx_post_retry, jiffies +
+ ATH10K_PCI_RX_POST_RETRY_MS);
+ break;
+ }
+ num--;
+ }
+}
+
+void ath10k_pci_rx_post(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
+}
+
+void ath10k_pci_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath10k_pci *ar_pci = from_timer(ar_pci, t, rx_post_retry);
+ struct ath10k *ar = ar_pci->ar;
+
+ ath10k_pci_rx_post(ar);
+}
+
+static u32 ath10k_pci_qca988x_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS)
+ & 0x7ff) << 21;
+ val |= 0x100000 | region;
+ return val;
+}
+
+static u32 ath10k_pci_qca99x0_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ u32 val = 0, region = addr & 0xfffff;
+
+ val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
+ val |= 0x100000 | region;
+ return val;
+}
+
+static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (WARN_ON_ONCE(!ar_pci->targ_cpu_to_ce_addr))
+ return -ENOTSUPP;
+
+ return ar_pci->targ_cpu_to_ce_addr(ar, addr);
+}
+
+/*
+ * Diagnostic read/write access is provided for startup/config/debug usage.
+ * Caller must guarantee proper alignment, when applicable, and single user
+ * at any moment.
+ */
+static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
+ int nbytes)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret = 0;
+ u32 *buf;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
+ struct ath10k_ce_pipe *ce_diag;
+ /* Host buffer address in CE space */
+ u32 ce_data;
+ dma_addr_t ce_data_base = 0;
+ void *data_buf = NULL;
+ int i;
+
+ spin_lock_bh(&ce->ce_lock);
+
+ ce_diag = ar_pci->ce_diag;
+
+ /*
+ * Allocate a temporary bounce buffer to hold caller's data
+ * to be DMA'ed from Target. This guarantees
+ * 1) 4-byte alignment
+ * 2) Buffer in DMA-able space
+ */
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
+ data_buf = (unsigned char *)dma_zalloc_coherent(ar->dev,
+ alloc_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
+
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ remaining_bytes = nbytes;
+ ce_data = ce_data_base;
+ while (remaining_bytes) {
+ nbytes = min_t(unsigned int, remaining_bytes,
+ DIAG_TRANSFER_LIMIT);
+
+ ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &ce_data, ce_data);
+ if (ret != 0)
+ goto done;
+
+ /* Request CE to send from Target(!) address to Host buffer */
+ /*
+ * The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from Target CPU virtual address space
+ * to CE address space
+ */
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
+
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
+ 0);
+ if (ret)
+ goto done;
+
+ i = 0;
+ while (ath10k_ce_completed_send_next_nolock(ce_diag,
+ NULL) != 0) {
+ mdelay(1);
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ i = 0;
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
+ mdelay(1);
+
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (*buf != ce_data) {
+ ret = -EIO;
+ goto done;
+ }
+
+ remaining_bytes -= nbytes;
+ memcpy(data, data_buf, nbytes);
+
+ address += nbytes;
+ data += nbytes;
+ }
+
+done:
+
+ if (data_buf)
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
+ ce_data_base);
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+
+static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
+{
+ __le32 val = 0;
+ int ret;
+
+ ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
+ *value = __le32_to_cpu(val);
+
+ return ret;
+}
+
+static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
+ u32 src, u32 len)
+{
+ u32 host_addr, addr;
+ int ret;
+
+ host_addr = host_interest_item_address(src);
+
+ ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
+ if (ret != 0) {
+ ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
+ src, ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
+ if (ret != 0) {
+ ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
+ addr, len, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define ath10k_pci_diag_read_hi(ar, dest, src, len) \
+ __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
+
+int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret = 0;
+ u32 *buf;
+ unsigned int completed_nbytes, alloc_nbytes, remaining_bytes;
+ struct ath10k_ce_pipe *ce_diag;
+ void *data_buf = NULL;
+ dma_addr_t ce_data_base = 0;
+ int i;
+
+ spin_lock_bh(&ce->ce_lock);
+
+ ce_diag = ar_pci->ce_diag;
+
+ /*
+ * Allocate a temporary bounce buffer to hold caller's data
+ * to be DMA'ed to Target. This guarantees
+ * 1) 4-byte alignment
+ * 2) Buffer in DMA-able space
+ */
+ alloc_nbytes = min_t(unsigned int, nbytes, DIAG_TRANSFER_LIMIT);
+
+ data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
+ alloc_nbytes,
+ &ce_data_base,
+ GFP_ATOMIC);
+ if (!data_buf) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ /*
+ * The address supplied by the caller is in the
+ * Target CPU virtual address space.
+ *
+ * In order to use this address with the diagnostic CE,
+ * convert it from
+ * Target CPU virtual address space
+ * to
+ * CE address space
+ */
+ address = ath10k_pci_targ_cpu_to_ce_addr(ar, address);
+
+ remaining_bytes = nbytes;
+ while (remaining_bytes) {
+ /* FIXME: check cast */
+ nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
+
+ /* Copy caller's data to allocated DMA buf */
+ memcpy(data_buf, data, nbytes);
+
+ /* Set up to receive directly into Target(!) address */
+ ret = ce_diag->ops->ce_rx_post_buf(ce_diag, &address, address);
+ if (ret != 0)
+ goto done;
+
+ /*
+ * Request CE to send caller-supplied data that
+ * was copied to bounce buffer to Target(!) address.
+ */
+ ret = ath10k_ce_send_nolock(ce_diag, NULL, ce_data_base,
+ nbytes, 0, 0);
+ if (ret != 0)
+ goto done;
+
+ i = 0;
+ while (ath10k_ce_completed_send_next_nolock(ce_diag,
+ NULL) != 0) {
+ mdelay(1);
+
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ i = 0;
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
+ mdelay(1);
+
+ if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
+ ret = -EBUSY;
+ goto done;
+ }
+ }
+
+ if (nbytes != completed_nbytes) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (*buf != address) {
+ ret = -EIO;
+ goto done;
+ }
+
+ remaining_bytes -= nbytes;
+ address += nbytes;
+ data += nbytes;
+ }
+
+done:
+ if (data_buf) {
+ dma_free_coherent(ar->dev, alloc_nbytes, data_buf,
+ ce_data_base);
+ }
+
+ if (ret != 0)
+ ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
+ address, ret);
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return ret;
+}
+
+static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
+{
+ __le32 val = __cpu_to_le32(value);
+
+ return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
+}
+
+/* Called by lower (CE) layer when a send to Target completes. */
+static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ /* no need to call tx completion for NULL pointers */
+ if (skb == NULL)
+ continue;
+
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list)))
+ ath10k_htc_tx_completion_handler(ar, skb);
+}
+
+static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ callback(ar, skb);
+ }
+
+ ath10k_pci_rx_post_pipe(pipe_info);
+}
+
+static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes, nentries;
+ int orig_len;
+
+ /* No need to aquire ce_lock for CE5, since this is the only place CE5
+ * is processed other than init and deinit. Before releasing CE5
+ * buffers, interrupts are disabled. Thus CE5 access is serialized.
+ */
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ continue;
+ }
+
+ dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ nentries = skb_queue_len(&list);
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ orig_len = skb->len;
+ callback(ar, skb);
+ skb_push(skb, orig_len - skb->len);
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ /*let device gain the buffer again*/
+ dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ }
+ ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
+}
+
+/* Called by lower (CE) layer when data is received from the Target. */
+static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_pci_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ /* CE4 polling needs to be done whenever CE pipe which transports
+ * HTT Rx (target->host) is processed.
+ */
+ ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+ ath10k_pci_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+/* Called by lower (CE) layer when data is received from the Target.
+ * Only 10.4 firmware uses separate CE to transfer pktlog data.
+ */
+static void ath10k_pci_pktlog_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_pci_process_rx_cb(ce_state,
+ ath10k_htt_rx_pktlog_completion_handler);
+}
+
+/* Called by lower (CE) layer when a send to HTT Target completes. */
+static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff *skb;
+
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ /* no need to call tx completion for NULL pointers */
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+ ath10k_htt_hif_tx_complete(ar, skb);
+ }
+}
+
+static void ath10k_pci_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+/* Called by lower (CE) layer when HTT data is received from the Target. */
+static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ /* CE4 polling needs to be done whenever CE pipe which transports
+ * HTT Rx (target->host) is processed.
+ */
+ ath10k_ce_per_engine_service(ce_state->ar, 4);
+
+ ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+}
+
+int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
+ struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
+ struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
+ unsigned int nentries_mask;
+ unsigned int sw_index;
+ unsigned int write_index;
+ int err, i = 0;
+
+ spin_lock_bh(&ce->ce_lock);
+
+ nentries_mask = src_ring->nentries_mask;
+ sw_index = src_ring->sw_index;
+ write_index = src_ring->write_index;
+
+ if (unlikely(CE_RING_DELTA(nentries_mask,
+ write_index, sw_index - 1) < n_items)) {
+ err = -ENOBUFS;
+ goto err;
+ }
+
+ for (i = 0; i < n_items - 1; i++) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
+ "pci tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
+ items[i].vaddr, items[i].len);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ CE_SEND_FLAG_GATHER);
+ if (err)
+ goto err;
+ }
+
+ /* `i` is equal to `n_items -1` after for() */
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI,
+ "pci tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
+ items[i].vaddr, items[i].len);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ 0);
+ if (err)
+ goto err;
+
+ spin_unlock_bh(&ce->ce_lock);
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __ath10k_ce_send_revert(ce_pipe);
+
+ spin_unlock_bh(&ce->ce_lock);
+ return err;
+}
+
+int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
+}
+
+u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
+
+ return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
+}
+
+static void ath10k_pci_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+ int i, ret;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ret = ath10k_pci_diag_read_hi(ar, &reg_dump_values[0],
+ hi_failure_state,
+ REG_DUMP_COUNT_QCA988X * sizeof(__le32));
+ if (ret) {
+ ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
+ return;
+ }
+
+ BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
+
+ ath10k_err(ar, "firmware register dump:\n");
+ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
+ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i,
+ __le32_to_cpu(reg_dump_values[i]),
+ __le32_to_cpu(reg_dump_values[i + 1]),
+ __le32_to_cpu(reg_dump_values[i + 2]),
+ __le32_to_cpu(reg_dump_values[i + 3]));
+
+ if (!crash_data)
+ return;
+
+ for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
+ crash_data->registers[i] = reg_dump_values[i];
+}
+
+static int ath10k_pci_dump_memory_section(struct ath10k *ar,
+ const struct ath10k_mem_region *mem_region,
+ u8 *buf, size_t buf_len)
+{
+ const struct ath10k_mem_section *cur_section, *next_section;
+ unsigned int count, section_size, skip_size;
+ int ret, i, j;
+
+ if (!mem_region || !buf)
+ return 0;
+
+ cur_section = &mem_region->section_table.sections[0];
+
+ if (mem_region->start > cur_section->start) {
+ ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
+ mem_region->start, cur_section->start);
+ return 0;
+ }
+
+ skip_size = cur_section->start - mem_region->start;
+
+ /* fill the gap between the first register section and register
+ * start address
+ */
+ for (i = 0; i < skip_size; i++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count = 0;
+
+ for (i = 0; cur_section != NULL; i++) {
+ section_size = cur_section->end - cur_section->start;
+
+ if (section_size <= 0) {
+ ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+ cur_section->start,
+ cur_section->end);
+ break;
+ }
+
+ if ((i + 1) == mem_region->section_table.size) {
+ /* last section */
+ next_section = NULL;
+ skip_size = 0;
+ } else {
+ next_section = cur_section + 1;
+
+ if (cur_section->end > next_section->start) {
+ ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+ next_section->start,
+ cur_section->end);
+ break;
+ }
+
+ skip_size = next_section->start - cur_section->end;
+ }
+
+ if (buf_len < (skip_size + section_size)) {
+ ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+ break;
+ }
+
+ buf_len -= skip_size + section_size;
+
+ /* read section to dest memory */
+ ret = ath10k_pci_diag_read_mem(ar, cur_section->start,
+ buf, section_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+ cur_section->start, ret);
+ break;
+ }
+
+ buf += section_size;
+ count += section_size;
+
+ /* fill in the gap between this section and the next */
+ for (j = 0; j < skip_size; j++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count += skip_size;
+
+ if (!next_section)
+ /* this was the last section */
+ break;
+
+ cur_section = next_section;
+ }
+
+ return count;
+}
+
+static int ath10k_pci_set_ram_config(struct ath10k *ar, u32 config)
+{
+ u32 val;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS, config);
+
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ FW_RAM_CONFIG_ADDRESS);
+ if (val != config) {
+ ath10k_warn(ar, "failed to set RAM config from 0x%x to 0x%x\n",
+ val, config);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/* if an error happened returns < 0, otherwise the length */
+static int ath10k_pci_dump_memory_sram(struct ath10k *ar,
+ const struct ath10k_mem_region *region,
+ u8 *buf)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 base_addr, i;
+
+ base_addr = ioread32(ar_pci->mem + QCA99X0_PCIE_BAR0_START_REG);
+ base_addr += region->start;
+
+ for (i = 0; i < region->len; i += 4) {
+ iowrite32(base_addr + i, ar_pci->mem + QCA99X0_CPU_MEM_ADDR_REG);
+ *(u32 *)(buf + i) = ioread32(ar_pci->mem + QCA99X0_CPU_MEM_DATA_REG);
+ }
+
+ return region->len;
+}
+
+/* if an error happened returns < 0, otherwise the length */
+static int ath10k_pci_dump_memory_reg(struct ath10k *ar,
+ const struct ath10k_mem_region *region,
+ u8 *buf)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ u32 i;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH10K_STATE_ON) {
+ ath10k_warn(ar, "Skipping pci_dump_memory_reg invalid state\n");
+ ret = -EIO;
+ goto done;
+ }
+
+ for (i = 0; i < region->len; i += 4)
+ *(u32 *)(buf + i) = ioread32(ar_pci->mem + region->start + i);
+
+ ret = region->len;
+done:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* if an error happened returns < 0, otherwise the length */
+static int ath10k_pci_dump_memory_generic(struct ath10k *ar,
+ const struct ath10k_mem_region *current_region,
+ u8 *buf)
+{
+ int ret;
+
+ if (current_region->section_table.size > 0)
+ /* Copy each section individually. */
+ return ath10k_pci_dump_memory_section(ar,
+ current_region,
+ buf,
+ current_region->len);
+
+ /* No individiual memory sections defined so we can
+ * copy the entire memory region.
+ */
+ ret = ath10k_pci_diag_read_mem(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+ if (ret) {
+ ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+ current_region->name, ret);
+ return ret;
+ }
+
+ return current_region->len;
+}
+
+static void ath10k_pci_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data)
+{
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ u32 count, shift;
+ size_t buf_len;
+ int ret, i;
+ u8 *buf;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!crash_data)
+ return;
+
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
+
+ current_region = &mem_layout->region_table.regions[0];
+
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+
+ memset(buf, 0, buf_len);
+
+ for (i = 0; i < mem_layout->region_table.size; i++) {
+ count = 0;
+
+ if (current_region->len > buf_len) {
+ ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+ current_region->name,
+ current_region->len,
+ buf_len);
+ break;
+ }
+
+ /* To get IRAM dump, the host driver needs to switch target
+ * ram config from DRAM to IRAM.
+ */
+ if (current_region->type == ATH10K_MEM_REGION_TYPE_IRAM1 ||
+ current_region->type == ATH10K_MEM_REGION_TYPE_IRAM2) {
+ shift = current_region->start >> 20;
+
+ ret = ath10k_pci_set_ram_config(ar, shift);
+ if (ret) {
+ ath10k_warn(ar, "failed to switch ram config to IRAM for section %s: %d\n",
+ current_region->name, ret);
+ break;
+ }
+ }
+
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ switch (current_region->type) {
+ case ATH10K_MEM_REGION_TYPE_IOSRAM:
+ count = ath10k_pci_dump_memory_sram(ar, current_region, buf);
+ break;
+ case ATH10K_MEM_REGION_TYPE_IOREG:
+ ret = ath10k_pci_dump_memory_reg(ar, current_region, buf);
+ if (ret < 0)
+ break;
+
+ count = ret;
+ break;
+ default:
+ ret = ath10k_pci_dump_memory_generic(ar, current_region, buf);
+ if (ret < 0)
+ break;
+
+ count = ret;
+ break;
+ }
+
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32(current_region->start);
+ hdr->length = cpu_to_le32(count);
+
+ if (count == 0)
+ /* Note: the header remains, just with zero length. */
+ break;
+
+ buf += count;
+ buf_len -= count;
+
+ current_region++;
+ }
+}
+
+static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data;
+ char guid[UUID_STRING_LEN + 1];
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->stats.fw_crash_counter++;
+
+ crash_data = ath10k_coredump_new(ar);
+
+ if (crash_data)
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
+ else
+ scnprintf(guid, sizeof(guid), "n/a");
+
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
+ ath10k_print_driver_info(ar);
+ ath10k_pci_dump_registers(ar, crash_data);
+ ath10k_ce_dump_registers(ar, crash_data);
+ ath10k_pci_dump_memory(ar, crash_data);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ queue_work(ar->workqueue, &ar->restart_work);
+}
+
+void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force)
+{
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
+
+ if (!force) {
+ int resources;
+ /*
+ * Decide whether to actually poll for completions, or just
+ * wait for a later chance.
+ * If there seem to be plenty of resources left, then just wait
+ * since checking involves reading a CE register, which is a
+ * relatively expensive operation.
+ */
+ resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
+
+ /*
+ * If at least 50% of the total resources are still available,
+ * don't bother checking again yet.
+ */
+ if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+ return;
+ }
+ ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static void ath10k_pci_rx_retry_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ del_timer_sync(&ar_pci->rx_post_retry);
+}
+
+int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
+
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
+
+ (void)ath10k_pci_hif_map_service_to_pipe(ar,
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ ul_pipe, dl_pipe);
+}
+
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
+{
+ u32 val;
+
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
+ case ATH10K_HW_QCA6174:
+ case ATH10K_HW_QCA9377:
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS);
+ val &= ~CORE_CTRL_PCIE_REG_31_MASK;
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS, val);
+ break;
+ case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ case ATH10K_HW_QCA9888:
+ case ATH10K_HW_QCA4019:
+ /* TODO: Find appropriate register configuration for QCA99X0
+ * to mask irq/MSI.
+ */
+ break;
+ case ATH10K_HW_WCN3990:
+ break;
+ }
+}
+
+static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
+{
+ u32 val;
+
+ switch (ar->hw_rev) {
+ case ATH10K_HW_QCA988X:
+ case ATH10K_HW_QCA9887:
+ case ATH10K_HW_QCA6174:
+ case ATH10K_HW_QCA9377:
+ val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS);
+ val |= CORE_CTRL_PCIE_REG_31_MASK;
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
+ CORE_CTRL_ADDRESS, val);
+ break;
+ case ATH10K_HW_QCA99X0:
+ case ATH10K_HW_QCA9984:
+ case ATH10K_HW_QCA9888:
+ case ATH10K_HW_QCA4019:
+ /* TODO: Find appropriate register configuration for QCA99X0
+ * to unmask irq/MSI.
+ */
+ break;
+ case ATH10K_HW_WCN3990:
+ break;
+ }
+}
+
+static void ath10k_pci_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+}
+
+static void ath10k_pci_irq_sync(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ synchronize_irq(ar_pci->pdev->irq);
+}
+
+static void ath10k_pci_irq_enable(struct ath10k *ar)
+{
+ ath10k_ce_enable_interrupts(ar);
+ ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_unmask(ar);
+}
+
+static int ath10k_pci_hif_start(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+
+ napi_enable(&ar->napi);
+
+ ath10k_pci_irq_enable(ar);
+ ath10k_pci_rx_post(ar);
+
+ pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ ar_pci->link_ctl);
+
+ return 0;
+}
+
+static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
+{
+ struct ath10k *ar;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ int i;
+
+ ar = pci_pipe->hif_ce_state;
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->dest_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!pci_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
+{
+ struct ath10k *ar;
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ int i;
+
+ ar = pci_pipe->hif_ce_state;
+ ce_pipe = pci_pipe->ce_hdl;
+ ce_ring = ce_pipe->src_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!pci_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ ath10k_htc_tx_completion_handler(ar, skb);
+ }
+}
+
+/*
+ * Cleanup residual buffers for device shutdown:
+ * buffers that were enqueued for receive
+ * buffers that were to be sent
+ * Note: Buffers that had completed but which were
+ * not yet processed are on a completion queue. They
+ * are handled when the completion thread shuts down.
+ */
+static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int pipe_num;
+
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ struct ath10k_pci_pipe *pipe_info;
+
+ pipe_info = &ar_pci->pipe_info[pipe_num];
+ ath10k_pci_rx_pipe_cleanup(pipe_info);
+ ath10k_pci_tx_pipe_cleanup(pipe_info);
+ }
+}
+
+void ath10k_pci_ce_deinit(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_deinit_pipe(ar, i);
+}
+
+void ath10k_pci_flush(struct ath10k *ar)
+{
+ ath10k_pci_rx_retry_sync(ar);
+ ath10k_pci_buffer_cleanup(ar);
+}
+
+static void ath10k_pci_hif_stop(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long flags;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+
+ ath10k_pci_irq_disable(ar);
+ ath10k_pci_irq_sync(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+
+ /* Most likely the device has HTT Rx ring configured. The only way to
+ * prevent the device from accessing (and possible corrupting) host
+ * memory is to reset the chip now.
+ *
+ * There's also no known way of masking MSI interrupts on the device.
+ * For ranged MSI the CE-related interrupts can be masked. However
+ * regardless how many MSI interrupts are assigned the first one
+ * is always used for firmware indications (crashes) and cannot be
+ * masked. To prevent the device from asserting the interrupt reset it
+ * before proceeding with cleanup.
+ */
+ ath10k_pci_safe_chip_reset(ar);
+
+ ath10k_pci_flush(ar);
+
+ spin_lock_irqsave(&ar_pci->ps_lock, flags);
+ WARN_ON(ar_pci->ps_wake_refcount > 0);
+ spin_unlock_irqrestore(&ar_pci->ps_lock, flags);
+}
+
+int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
+ struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
+ struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
+ struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
+ dma_addr_t req_paddr = 0;
+ dma_addr_t resp_paddr = 0;
+ struct bmi_xfer xfer = {};
+ void *treq, *tresp = NULL;
+ int ret = 0;
+
+ might_sleep();
+
+ if (resp && !resp_len)
+ return -EINVAL;
+
+ if (resp && resp_len && *resp_len == 0)
+ return -EINVAL;
+
+ treq = kmemdup(req, req_len, GFP_KERNEL);
+ if (!treq)
+ return -ENOMEM;
+
+ req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(ar->dev, req_paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_dma;
+ }
+
+ if (resp && resp_len) {
+ tresp = kzalloc(*resp_len, GFP_KERNEL);
+ if (!tresp) {
+ ret = -ENOMEM;
+ goto err_req;
+ }
+
+ resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
+ DMA_FROM_DEVICE);
+ ret = dma_mapping_error(ar->dev, resp_paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_req;
+ }
+
+ xfer.wait_for_resp = true;
+ xfer.resp_len = 0;
+
+ ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
+ }
+
+ ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
+ if (ret)
+ goto err_resp;
+
+ ret = ath10k_pci_bmi_wait(ar, ce_tx, ce_rx, &xfer);
+ if (ret) {
+ dma_addr_t unused_buffer;
+ unsigned int unused_nbytes;
+ unsigned int unused_id;
+
+ ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
+ &unused_nbytes, &unused_id);
+ } else {
+ /* non-zero means we did not time out */
+ ret = 0;
+ }
+
+err_resp:
+ if (resp) {
+ dma_addr_t unused_buffer;
+
+ ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
+ dma_unmap_single(ar->dev, resp_paddr,
+ *resp_len, DMA_FROM_DEVICE);
+ }
+err_req:
+ dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
+
+ if (ret == 0 && resp_len) {
+ *resp_len = min(*resp_len, xfer.resp_len);
+ memcpy(resp, tresp, xfer.resp_len);
+ }
+err_dma:
+ kfree(treq);
+ kfree(tresp);
+
+ return ret;
+}
+
+static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
+{
+ struct bmi_xfer *xfer;
+
+ if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
+ return;
+
+ xfer->tx_done = true;
+}
+
+static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct bmi_xfer *xfer;
+ unsigned int nbytes;
+
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+ &nbytes))
+ return;
+
+ if (WARN_ON_ONCE(!xfer))
+ return;
+
+ if (!xfer->wait_for_resp) {
+ ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
+ return;
+ }
+
+ xfer->resp_len = nbytes;
+ xfer->rx_done = true;
+}
+
+static int ath10k_pci_bmi_wait(struct ath10k *ar,
+ struct ath10k_ce_pipe *tx_pipe,
+ struct ath10k_ce_pipe *rx_pipe,
+ struct bmi_xfer *xfer)
+{
+ unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+ unsigned long started = jiffies;
+ unsigned long dur;
+ int ret;
+
+ while (time_before_eq(jiffies, timeout)) {
+ ath10k_pci_bmi_send_done(tx_pipe);
+ ath10k_pci_bmi_recv_data(rx_pipe);
+
+ if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp)) {
+ ret = 0;
+ goto out;
+ }
+
+ schedule();
+ }
+
+ ret = -ETIMEDOUT;
+
+out:
+ dur = jiffies - started;
+ if (dur > HZ)
+ ath10k_dbg(ar, ATH10K_DBG_BMI,
+ "bmi cmd took %lu jiffies hz %d ret %d\n",
+ dur, HZ, ret);
+ return ret;
+}
+
+/*
+ * Send an interrupt to the device to wake up the Target CPU
+ * so it has an opportunity to notice any changed state.
+ */
+static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
+{
+ u32 addr, val;
+
+ addr = SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS;
+ val = ath10k_pci_read32(ar, addr);
+ val |= CORE_CTRL_CPU_INTR_MASK;
+ ath10k_pci_write32(ar, addr, val);
+
+ return 0;
+}
+
+static int ath10k_pci_get_num_banks(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->pdev->device) {
+ case QCA988X_2_0_DEVICE_ID_UBNT:
+ case QCA988X_2_0_DEVICE_ID:
+ case QCA99X0_2_0_DEVICE_ID:
+ case QCA9888_2_0_DEVICE_ID:
+ case QCA9984_1_0_DEVICE_ID:
+ case QCA9887_1_0_DEVICE_ID:
+ return 1;
+ case QCA6164_2_1_DEVICE_ID:
+ case QCA6174_2_1_DEVICE_ID:
+ switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
+ case QCA6174_HW_1_0_CHIP_ID_REV:
+ case QCA6174_HW_1_1_CHIP_ID_REV:
+ case QCA6174_HW_2_1_CHIP_ID_REV:
+ case QCA6174_HW_2_2_CHIP_ID_REV:
+ return 3;
+ case QCA6174_HW_1_3_CHIP_ID_REV:
+ return 2;
+ case QCA6174_HW_3_0_CHIP_ID_REV:
+ case QCA6174_HW_3_1_CHIP_ID_REV:
+ case QCA6174_HW_3_2_CHIP_ID_REV:
+ return 9;
+ }
+ break;
+ case QCA9377_1_0_DEVICE_ID:
+ return 9;
+ }
+
+ ath10k_warn(ar, "unknown number of banks, assuming 1\n");
+ return 1;
+}
+
+static int ath10k_bus_get_num_banks(struct ath10k *ar)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+
+ return ce->bus_ops->get_num_banks(ar);
+}
+
+int ath10k_pci_init_config(struct ath10k *ar)
+{
+ u32 interconnect_targ_addr;
+ u32 pcie_state_targ_addr = 0;
+ u32 pipe_cfg_targ_addr = 0;
+ u32 svc_to_pipe_map = 0;
+ u32 pcie_config_flags = 0;
+ u32 ealloc_value;
+ u32 ealloc_targ_addr;
+ u32 flag2_value;
+ u32 flag2_targ_addr;
+ int ret = 0;
+
+ /* Download to Target the CE Config and the service-to-CE map */
+ interconnect_targ_addr =
+ host_interest_item_address(HI_ITEM(hi_interconnect_state));
+
+ /* Supply Target-side CE configuration */
+ ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
+ &pcie_state_targ_addr);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
+ return ret;
+ }
+
+ if (pcie_state_targ_addr == 0) {
+ ret = -EIO;
+ ath10k_err(ar, "Invalid pcie state addr\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ pipe_cfg_addr)),
+ &pipe_cfg_targ_addr);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
+ return ret;
+ }
+
+ if (pipe_cfg_targ_addr == 0) {
+ ret = -EIO;
+ ath10k_err(ar, "Invalid pipe cfg addr\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
+ target_ce_config_wlan,
+ sizeof(struct ce_pipe_config) *
+ NUM_TARGET_CE_CONFIG_WLAN);
+
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ svc_to_pipe_map)),
+ &svc_to_pipe_map);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
+ return ret;
+ }
+
+ if (svc_to_pipe_map == 0) {
+ ret = -EIO;
+ ath10k_err(ar, "Invalid svc_to_pipe map\n");
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
+ target_service_to_ce_map_wlan,
+ sizeof(target_service_to_ce_map_wlan));
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ config_flags)),
+ &pcie_config_flags);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
+ return ret;
+ }
+
+ pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
+
+ ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
+ offsetof(struct pcie_state,
+ config_flags)),
+ pcie_config_flags);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
+ return ret;
+ }
+
+ /* configure early allocation */
+ ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
+
+ ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get early alloc val: %d\n", ret);
+ return ret;
+ }
+
+ /* first bank is switched to IRAM */
+ ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
+ HI_EARLY_ALLOC_MAGIC_MASK);
+ ealloc_value |= ((ath10k_bus_get_num_banks(ar) <<
+ HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
+ HI_EARLY_ALLOC_IRAM_BANKS_MASK);
+
+ ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
+ return ret;
+ }
+
+ /* Tell Target to proceed with initialization */
+ flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
+
+ ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to get option val: %d\n", ret);
+ return ret;
+ }
+
+ flag2_value |= HI_OPTION_EARLY_CFG_DONE;
+
+ ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
+ if (ret != 0) {
+ ath10k_err(ar, "Failed to set option val: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_pci_override_ce_config(struct ath10k *ar)
+{
+ struct ce_attr *attr;
+ struct ce_pipe_config *config;
+
+ /* For QCA6174 we're overriding the Copy Engine 5 configuration,
+ * since it is currently used for other feature.
+ */
+
+ /* Override Host's Copy Engine 5 configuration */
+ attr = &host_ce_config_wlan[5];
+ attr->src_sz_max = 0;
+ attr->dest_nentries = 0;
+
+ /* Override Target firmware's Copy Engine configuration */
+ config = &target_ce_config_wlan[5];
+ config->pipedir = __cpu_to_le32(PIPEDIR_OUT);
+ config->nbytes_max = __cpu_to_le32(2048);
+
+ /* Map from service/endpoint to Copy Engine */
+ target_service_to_ce_map_wlan[15].pipenum = __cpu_to_le32(1);
+}
+
+int ath10k_pci_alloc_pipes(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ar_pci->pipe_info[i];
+ pipe->ce_hdl = &ce->ce_states[i];
+ pipe->pipe_num = i;
+ pipe->hif_ce_state = ar;
+
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ /* Last CE is Diagnostic Window */
+ if (i == CE_DIAG_PIPE) {
+ ar_pci->ce_diag = pipe->ce_hdl;
+ continue;
+ }
+
+ pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
+ }
+
+ return 0;
+}
+
+void ath10k_pci_free_pipes(struct ath10k *ar)
+{
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
+
+int ath10k_pci_init_pipes(struct ath10k *ar)
+{
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
+{
+ return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
+ FW_IND_EVENT_PENDING;
+}
+
+static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+ val &= ~FW_IND_EVENT_PENDING;
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
+}
+
+static bool ath10k_pci_has_device_gone(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+ return (val == 0xffffffff);
+}
+
+/* this function effectively clears target memory controller assert line */
+static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
+ val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
+
+ msleep(10);
+}
+
+static void ath10k_pci_warm_reset_cpu(struct ath10k *ar)
+{
+ u32 val;
+
+ ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
+
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
+}
+
+static void ath10k_pci_warm_reset_ce(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_RESET_CONTROL_ADDRESS);
+
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val | SOC_RESET_CONTROL_CE_RST_MASK);
+ msleep(10);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
+ val & ~SOC_RESET_CONTROL_CE_RST_MASK);
+}
+
+static void ath10k_pci_warm_reset_clear_lf(struct ath10k *ar)
+{
+ u32 val;
+
+ val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_LF_TIMER_CONTROL0_ADDRESS);
+ ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
+ SOC_LF_TIMER_CONTROL0_ADDRESS,
+ val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
+}
+
+static int ath10k_pci_warm_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
+
+ spin_lock_bh(&ar->data_lock);
+ ar->stats.fw_warm_reset_counter++;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_pci_irq_disable(ar);
+
+ /* Make sure the target CPU is not doing anything dangerous, e.g. if it
+ * were to access copy engine while host performs copy engine reset
+ * then it is possible for the device to confuse pci-e controller to
+ * the point of bringing host system to a complete stop (i.e. hang).
+ */
+ ath10k_pci_warm_reset_si0(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
+ ath10k_pci_wait_for_target_init(ar);
+
+ ath10k_pci_warm_reset_clear_lf(ar);
+ ath10k_pci_warm_reset_ce(ar);
+ ath10k_pci_warm_reset_cpu(ar);
+ ath10k_pci_init_pipes(ar);
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target init: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
+
+ return 0;
+}
+
+static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
+{
+ ath10k_pci_irq_disable(ar);
+ return ath10k_pci_qca99x0_chip_reset(ar);
+}
+
+static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci->pci_soft_reset)
+ return -ENOTSUPP;
+
+ return ar_pci->pci_soft_reset(ar);
+}
+
+static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
+{
+ int i, ret;
+ u32 val;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
+
+ /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
+ * It is thus preferred to use warm reset which is safer but may not be
+ * able to recover the device from all possible fail scenarios.
+ *
+ * Warm reset doesn't always work on first try so attempt it a few
+ * times before giving up.
+ */
+ for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
+ ret = ath10k_pci_warm_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to warm reset attempt %d of %d: %d\n",
+ i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS,
+ ret);
+ continue;
+ }
+
+ /* FIXME: Sometimes copy engine doesn't recover after warm
+ * reset. In most cases this needs cold reset. In some of these
+ * cases the device is in such a state that a cold reset may
+ * lock up the host.
+ *
+ * Reading any host interest register via copy engine is
+ * sufficient to verify if device is capable of booting
+ * firmware blob.
+ */
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to init copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ret = ath10k_pci_diag_read32(ar, QCA988X_HOST_INTEREST_ADDRESS,
+ &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to poke copy engine: %d\n",
+ ret);
+ continue;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (warm)\n");
+ return 0;
+ }
+
+ if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY) {
+ ath10k_warn(ar, "refusing cold reset as requested\n");
+ return -EPERM;
+ }
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
+
+ return 0;
+}
+
+static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
+
+ /* FIXME: QCA6174 requires cold + warm reset to work. */
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_warm_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to warm reset: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
+
+ return 0;
+}
+
+static int ath10k_pci_qca99x0_chip_reset(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset\n");
+
+ ret = ath10k_pci_cold_reset(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to cold reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_pci_wait_for_target_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca99x0 chip reset complete (cold)\n");
+
+ return 0;
+}
+
+static int ath10k_pci_chip_reset(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (WARN_ON(!ar_pci->pci_hard_reset))
+ return -ENOTSUPP;
+
+ return ar_pci->pci_hard_reset(ar);
+}
+
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
+
+ pcie_capability_read_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ &ar_pci->link_ctl);
+ pcie_capability_write_word(ar_pci->pdev, PCI_EXP_LNKCTL,
+ ar_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+ /*
+ * Bring the target up cleanly.
+ *
+ * The target may be in an undefined state with an AUX-powered Target
+ * and a Host in WoW mode. If the Host crashes, loses power, or is
+ * restarted (without unloading the driver) then the Target is left
+ * (aux) powered and running. On a subsequent driver load, the Target
+ * is in an unexpected state. We try to catch that here in order to
+ * reset the Target and retry the probe.
+ */
+ ret = ath10k_pci_chip_reset(ar);
+ if (ret) {
+ if (ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_warn(ar, "firmware crashed during chip reset\n");
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
+ }
+
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto err_sleep;
+ }
+
+ ret = ath10k_pci_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto err_sleep;
+ }
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup init config: %d\n", ret);
+ goto err_ce;
+ }
+
+ ret = ath10k_pci_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
+ goto err_ce;
+ }
+
+ return 0;
+
+err_ce:
+ ath10k_pci_ce_deinit(ar);
+
+err_sleep:
+ return ret;
+}
+
+void ath10k_pci_hif_power_down(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+
+ /* Currently hif_power_up performs effectively a reset and hif_stop
+ * resets the chip as well so there's no point in resetting here.
+ */
+}
+
+static int ath10k_pci_hif_suspend(struct ath10k *ar)
+{
+ /* Nothing to do; the important stuff is in the driver suspend. */
+ return 0;
+}
+
+static int ath10k_pci_suspend(struct ath10k *ar)
+{
+ /* The grace timer can still be counting down and ar->ps_awake be true.
+ * It is known that the device may be asleep after resuming regardless
+ * of the SoC powersave state before suspending. Hence make sure the
+ * device is asleep before proceeding.
+ */
+ ath10k_pci_sleep_sync(ar);
+
+ return 0;
+}
+
+static int ath10k_pci_hif_resume(struct ath10k *ar)
+{
+ /* Nothing to do; the important stuff is in the driver resume. */
+ return 0;
+}
+
+static int ath10k_pci_resume(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 val;
+ int ret = 0;
+
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to wake up target: %d\n", ret);
+ return ret;
+ }
+
+ /* Suspend/Resume resets the PCI configuration space, so we have to
+ * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+ * from interfering with C3 CPU state. pci_restore_state won't help
+ * here since it only restores the first 64 bytes pci config header.
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+ return ret;
+}
+
+static bool ath10k_pci_validate_cal(void *data, size_t size)
+{
+ __le16 *cal_words = data;
+ u16 checksum = 0;
+ size_t i;
+
+ if (size % 2 != 0)
+ return false;
+
+ for (i = 0; i < size / 2; i++)
+ checksum ^= le16_to_cpu(cal_words[i]);
+
+ return checksum == 0xffff;
+}
+
+static void ath10k_pci_enable_eeprom(struct ath10k *ar)
+{
+ /* Enable SI clock */
+ ath10k_pci_soc_write32(ar, CLOCK_CONTROL_OFFSET, 0x0);
+
+ /* Configure GPIOs for I2C operation */
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_I2C_SDA_GPIO_PIN,
+ SM(QCA9887_1_0_I2C_SDA_PIN_CONFIG,
+ GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS + GPIO_PIN0_OFFSET +
+ 4 * QCA9887_1_0_SI_CLK_GPIO_PIN,
+ SM(QCA9887_1_0_SI_CLK_PIN_CONFIG, GPIO_PIN0_CONFIG) |
+ SM(1, GPIO_PIN0_PAD_PULL));
+
+ ath10k_pci_write32(ar,
+ GPIO_BASE_ADDRESS +
+ QCA9887_1_0_GPIO_ENABLE_W1TS_LOW_ADDRESS,
+ 1u << QCA9887_1_0_SI_CLK_GPIO_PIN);
+
+ /* In Swift ASIC - EEPROM clock will be (110MHz/512) = 214KHz */
+ ath10k_pci_write32(ar,
+ SI_BASE_ADDRESS + SI_CONFIG_OFFSET,
+ SM(1, SI_CONFIG_ERR_INT) |
+ SM(1, SI_CONFIG_BIDIR_OD_DATA) |
+ SM(1, SI_CONFIG_I2C) |
+ SM(1, SI_CONFIG_POS_SAMPLE) |
+ SM(1, SI_CONFIG_INACTIVE_DATA) |
+ SM(1, SI_CONFIG_INACTIVE_CLK) |
+ SM(8, SI_CONFIG_DIVIDER));
+}
+
+static int ath10k_pci_read_eeprom(struct ath10k *ar, u16 addr, u8 *out)
+{
+ u32 reg;
+ int wait_limit;
+
+ /* set device select byte and for the read operation */
+ reg = QCA9887_EEPROM_SELECT_READ |
+ SM(addr, QCA9887_EEPROM_ADDR_LO) |
+ SM(addr >> 8, QCA9887_EEPROM_ADDR_HI);
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_TX_DATA0_OFFSET, reg);
+
+ /* write transmit data, transfer length, and START bit */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET,
+ SM(1, SI_CS_START) | SM(1, SI_CS_RX_CNT) |
+ SM(4, SI_CS_TX_CNT));
+
+ /* wait max 1 sec */
+ wait_limit = 100000;
+
+ /* wait for SI_CS_DONE_INT */
+ do {
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET);
+ if (MS(reg, SI_CS_DONE_INT))
+ break;
+
+ wait_limit--;
+ udelay(10);
+ } while (wait_limit > 0);
+
+ if (!MS(reg, SI_CS_DONE_INT)) {
+ ath10k_err(ar, "timeout while reading device EEPROM at %04x\n",
+ addr);
+ return -ETIMEDOUT;
+ }
+
+ /* clear SI_CS_DONE_INT */
+ ath10k_pci_write32(ar, SI_BASE_ADDRESS + SI_CS_OFFSET, reg);
+
+ if (MS(reg, SI_CS_DONE_ERR)) {
+ ath10k_err(ar, "failed to read device EEPROM at %04x\n", addr);
+ return -EIO;
+ }
+
+ /* extract receive data */
+ reg = ath10k_pci_read32(ar, SI_BASE_ADDRESS + SI_RX_DATA0_OFFSET);
+ *out = reg;
+
+ return 0;
+}
+
+static int ath10k_pci_hif_fetch_cal_eeprom(struct ath10k *ar, void **data,
+ size_t *data_len)
+{
+ u8 *caldata = NULL;
+ size_t calsize, i;
+ int ret;
+
+ if (!QCA_REV_9887(ar))
+ return -EOPNOTSUPP;
+
+ calsize = ar->hw_params.cal_data_len;
+ caldata = kmalloc(calsize, GFP_KERNEL);
+ if (!caldata)
+ return -ENOMEM;
+
+ ath10k_pci_enable_eeprom(ar);
+
+ for (i = 0; i < calsize; i++) {
+ ret = ath10k_pci_read_eeprom(ar, i, &caldata[i]);
+ if (ret)
+ goto err_free;
+ }
+
+ if (!ath10k_pci_validate_cal(caldata, calsize))
+ goto err_free;
+
+ *data = caldata;
+ *data_len = calsize;
+
+ return 0;
+
+err_free:
+ kfree(caldata);
+
+ return -EINVAL;
+}
+
+static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
+ .tx_sg = ath10k_pci_hif_tx_sg,
+ .diag_read = ath10k_pci_hif_diag_read,
+ .diag_write = ath10k_pci_diag_write_mem,
+ .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
+ .start = ath10k_pci_hif_start,
+ .stop = ath10k_pci_hif_stop,
+ .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_pci_hif_get_default_pipe,
+ .send_complete_check = ath10k_pci_hif_send_complete_check,
+ .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+ .power_up = ath10k_pci_hif_power_up,
+ .power_down = ath10k_pci_hif_power_down,
+ .read32 = ath10k_pci_read32,
+ .write32 = ath10k_pci_write32,
+ .suspend = ath10k_pci_hif_suspend,
+ .resume = ath10k_pci_hif_resume,
+ .fetch_cal_eeprom = ath10k_pci_hif_fetch_cal_eeprom,
+};
+
+/*
+ * Top-level interrupt handler for all PCI interrupts from a Target.
+ * When a block of MSI interrupts is allocated, this top-level handler
+ * is not used; instead, we directly call the correct sub-handler.
+ */
+static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ if (ath10k_pci_has_device_gone(ar))
+ return IRQ_NONE;
+
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake device up on irq: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
+ !ath10k_pci_irq_pending(ar))
+ return IRQ_NONE;
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+ napi_schedule(&ar->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done = 0;
+
+ if (ath10k_pci_has_fw_crashed(ar)) {
+ ath10k_pci_fw_crashed_clear(ar);
+ ath10k_pci_fw_crashed_dump(ar);
+ napi_complete(ctx);
+ return done;
+ }
+
+ ath10k_ce_per_engine_service_any(ar);
+
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget) {
+ napi_complete_done(ctx, done);
+ /* In case of MSI, it is possible that interrupts are received
+ * while NAPI poll is inprogress. So pending interrupts that are
+ * received after processing all copy engine pipes by NAPI poll
+ * will not be handled again. This is causing failure to
+ * complete boot sequence in x86 platform. So before enabling
+ * interrupts safer to check for pending interrupts for
+ * immediate servicing.
+ */
+ if (ath10k_ce_interrupt_summary(ar)) {
+ napi_reschedule(ctx);
+ goto out;
+ }
+ ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_unmask(ar);
+ }
+
+out:
+ return done;
+}
+
+static int ath10k_pci_request_irq_msi(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_pci->pdev->irq,
+ ath10k_pci_interrupt_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = request_irq(ar_pci->pdev->irq,
+ ath10k_pci_interrupt_handler,
+ IRQF_SHARED, "ath10k_pci", ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
+ ar_pci->pdev->irq, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_request_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
+ return ath10k_pci_request_irq_legacy(ar);
+ case ATH10K_PCI_IRQ_MSI:
+ return ath10k_pci_request_irq_msi(ar);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ath10k_pci_free_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ free_irq(ar_pci->pdev->irq, ar);
+}
+
+void ath10k_pci_init_napi(struct ath10k *ar)
+{
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll,
+ ATH10K_NAPI_BUDGET);
+}
+
+static int ath10k_pci_init_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ath10k_pci_init_napi(ar);
+
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
+ ath10k_info(ar, "limiting irq mode to: %d\n",
+ ath10k_pci_irq_mode);
+
+ /* Try MSI */
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
+ ret = pci_enable_msi(ar_pci->pdev);
+ if (ret == 0)
+ return 0;
+
+ /* fall-through */
+ }
+
+ /* Try legacy irq
+ *
+ * A potential race occurs here: The CORE_BASE write
+ * depends on target correctly decoding AXI address but
+ * host won't know when target writes BAR to CORE_CTRL.
+ * This write might get lost if target has NOT written BAR.
+ * For now, fix the race by repeating the write in below
+ * synchronization checking.
+ */
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
+
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
+
+ return 0;
+}
+
+static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
+{
+ ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
+ 0);
+}
+
+static int ath10k_pci_deinit_irq(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
+ ath10k_pci_deinit_irq_legacy(ar);
+ break;
+ default:
+ pci_disable_msi(ar_pci->pdev);
+ break;
+ }
+
+ return 0;
+}
+
+int ath10k_pci_wait_for_target_init(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ unsigned long timeout;
+ u32 val;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
+
+ timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
+
+ do {
+ val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
+ val);
+
+ /* target should never return this */
+ if (val == 0xffffffff)
+ continue;
+
+ /* the device has crashed so don't bother trying anymore */
+ if (val & FW_IND_EVENT_PENDING)
+ break;
+
+ if (val & FW_IND_INITIALIZED)
+ break;
+
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
+ /* Fix potential race by repeating CORE_BASE writes */
+ ath10k_pci_enable_legacy_irq(ar);
+
+ mdelay(10);
+ } while (time_before(jiffies, timeout));
+
+ ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_irq_msi_fw_mask(ar);
+
+ if (val == 0xffffffff) {
+ ath10k_err(ar, "failed to read device register, device is gone\n");
+ return -EIO;
+ }
+
+ if (val & FW_IND_EVENT_PENDING) {
+ ath10k_warn(ar, "device has crashed during init\n");
+ return -ECOMM;
+ }
+
+ if (!(val & FW_IND_INITIALIZED)) {
+ ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
+ val);
+ return -ETIMEDOUT;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
+ return 0;
+}
+
+static int ath10k_pci_cold_reset(struct ath10k *ar)
+{
+ u32 val;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->stats.fw_cold_reset_counter++;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ /* Put Target, including PCIe, into RESET. */
+ val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
+ val |= 1;
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
+
+ /* After writing into SOC_GLOBAL_RESET to put device into
+ * reset and pulling out of reset pcie may not be stable
+ * for any immediate pcie register access and cause bus error,
+ * add delay before any pcie access request to fix this issue.
+ */
+ msleep(20);
+
+ /* Pull Target, including PCIe, out of RESET. */
+ val &= ~1;
+ ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
+
+ msleep(20);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
+
+ return 0;
+}
+
+static int ath10k_pci_claim(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ int ret;
+
+ pci_set_drvdata(pdev, ar);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath10k_err(ar, "failed to enable pci device: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_request_region(pdev, BAR_NUM, "ath");
+ if (ret) {
+ ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
+ ret);
+ goto err_device;
+ }
+
+ /* Target expects 32 bit DMA. Enforce it. */
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
+ goto err_region;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret) {
+ ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
+ ret);
+ goto err_region;
+ }
+
+ pci_set_master(pdev);
+
+ /* Arrange for access to Target SoC registers. */
+ ar_pci->mem_len = pci_resource_len(pdev, BAR_NUM);
+ ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
+ if (!ar_pci->mem) {
+ ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
+ ret = -EIO;
+ goto err_master;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%pK\n", ar_pci->mem);
+ return 0;
+
+err_master:
+ pci_clear_master(pdev);
+
+err_region:
+ pci_release_region(pdev, BAR_NUM);
+
+err_device:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void ath10k_pci_release(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+
+ pci_iounmap(pdev, ar_pci->mem);
+ pci_release_region(pdev, BAR_NUM);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+static bool ath10k_pci_chip_is_supported(u32 dev_id, u32 chip_id)
+{
+ const struct ath10k_pci_supp_chip *supp_chip;
+ int i;
+ u32 rev_id = MS(chip_id, SOC_CHIP_ID_REV);
+
+ for (i = 0; i < ARRAY_SIZE(ath10k_pci_supp_chips); i++) {
+ supp_chip = &ath10k_pci_supp_chips[i];
+
+ if (supp_chip->dev_id == dev_id &&
+ supp_chip->rev_id == rev_id)
+ return true;
+ }
+
+ return false;
+}
+
+int ath10k_pci_setup_resource(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ int ret;
+
+ spin_lock_init(&ce->ce_lock);
+ spin_lock_init(&ar_pci->ps_lock);
+
+ timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
+
+ if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
+ ath10k_pci_override_ce_config(ar);
+
+ ret = ath10k_pci_alloc_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath10k_pci_release_resource(struct ath10k *ar)
+{
+ ath10k_pci_rx_retry_sync(ar);
+ netif_napi_del(&ar->napi);
+ ath10k_pci_ce_deinit(ar);
+ ath10k_pci_free_pipes(ar);
+}
+
+static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
+ .read32 = ath10k_bus_pci_read32,
+ .write32 = ath10k_bus_pci_write32,
+ .get_num_banks = ath10k_pci_get_num_banks,
+};
+
+static int ath10k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ int ret = 0;
+ struct ath10k *ar;
+ struct ath10k_pci *ar_pci;
+ enum ath10k_hw_rev hw_rev;
+ u32 chip_id;
+ bool pci_ps;
+ int (*pci_soft_reset)(struct ath10k *ar);
+ int (*pci_hard_reset)(struct ath10k *ar);
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
+ switch (pci_dev->device) {
+ case QCA988X_2_0_DEVICE_ID_UBNT:
+ case QCA988X_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA988X;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ case QCA9887_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9887;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca988x_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ case QCA6164_2_1_DEVICE_ID:
+ case QCA6174_2_1_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA6174;
+ pci_ps = true;
+ pci_soft_reset = ath10k_pci_warm_reset;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ case QCA99X0_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA99X0;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
+ break;
+ case QCA9984_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9984;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
+ break;
+ case QCA9888_2_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9888;
+ pci_ps = false;
+ pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+ pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
+ break;
+ case QCA9377_1_0_DEVICE_ID:
+ hw_rev = ATH10K_HW_QCA9377;
+ pci_ps = true;
+ pci_soft_reset = NULL;
+ pci_hard_reset = ath10k_pci_qca6174_chip_reset;
+ targ_cpu_to_ce_addr = ath10k_pci_qca988x_targ_cpu_to_ce_addr;
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOTSUPP;
+ }
+
+ ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
+ hw_rev, &ath10k_pci_hif_ops);
+ if (!ar) {
+ dev_err(&pdev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ ar_pci = ath10k_pci_priv(ar);
+ ar_pci->pdev = pdev;
+ ar_pci->dev = &pdev->dev;
+ ar_pci->ar = ar;
+ ar->dev_id = pci_dev->device;
+ ar_pci->pci_ps = pci_ps;
+ ar_pci->ce.bus_ops = &ath10k_pci_bus_ops;
+ ar_pci->pci_soft_reset = pci_soft_reset;
+ ar_pci->pci_hard_reset = pci_hard_reset;
+ ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
+ ar->ce_priv = &ar_pci->ce;
+
+ ar->id.vendor = pdev->vendor;
+ ar->id.device = pdev->device;
+ ar->id.subsystem_vendor = pdev->subsystem_vendor;
+ ar->id.subsystem_device = pdev->subsystem_device;
+
+ timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
+
+ ret = ath10k_pci_setup_resource(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to setup resource: %d\n", ret);
+ goto err_core_destroy;
+ }
+
+ ret = ath10k_pci_claim(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to claim device: %d\n", ret);
+ goto err_free_pipes;
+ }
+
+ ret = ath10k_pci_force_wake(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to wake up device : %d\n", ret);
+ goto err_sleep;
+ }
+
+ ath10k_pci_ce_deinit(ar);
+ ath10k_pci_irq_disable(ar);
+
+ ret = ath10k_pci_init_irq(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to init irqs: %d\n", ret);
+ goto err_sleep;
+ }
+
+ ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
+ ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
+ ath10k_pci_irq_mode, ath10k_pci_reset_mode);
+
+ ret = ath10k_pci_request_irq(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+ goto err_deinit_irq;
+ }
+
+ ret = ath10k_pci_chip_reset(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to reset chip: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
+ if (chip_id == 0xffffffff) {
+ ath10k_err(ar, "failed to get chip id\n");
+ goto err_free_irq;
+ }
+
+ if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
+ ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
+ pdev->device, chip_id);
+ goto err_free_irq;
+ }
+
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ return 0;
+
+err_free_irq:
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_rx_retry_sync(ar);
+
+err_deinit_irq:
+ ath10k_pci_deinit_irq(ar);
+
+err_sleep:
+ ath10k_pci_sleep_sync(ar);
+ ath10k_pci_release(ar);
+
+err_free_pipes:
+ ath10k_pci_free_pipes(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static void ath10k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath10k *ar = pci_get_drvdata(pdev);
+ struct ath10k_pci *ar_pci;
+
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
+
+ if (!ar)
+ return;
+
+ ar_pci = ath10k_pci_priv(ar);
+
+ if (!ar_pci)
+ return;
+
+ ath10k_core_unregister(ar);
+ ath10k_pci_free_irq(ar);
+ ath10k_pci_deinit_irq(ar);
+ ath10k_pci_release_resource(ar);
+ ath10k_pci_sleep_sync(ar);
+ ath10k_pci_release(ar);
+ ath10k_core_destroy(ar);
+}
+
+MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
+
+static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
+{
+ struct ath10k *ar = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath10k_pci_suspend(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+
+ return ret;
+}
+
+static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
+{
+ struct ath10k *ar = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath10k_pci_resume(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
+ ath10k_pci_pm_suspend,
+ ath10k_pci_pm_resume);
+
+static struct pci_driver ath10k_pci_driver = {
+ .name = "ath10k_pci",
+ .id_table = ath10k_pci_id_table,
+ .probe = ath10k_pci_probe,
+ .remove = ath10k_pci_remove,
+#ifdef CONFIG_PM
+ .driver.pm = &ath10k_pci_pm_ops,
+#endif
+};
+
+static int __init ath10k_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath10k_pci_driver);
+ if (ret)
+ printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
+ ret);
+
+ ret = ath10k_ahb_init();
+ if (ret)
+ printk(KERN_ERR "ahb init failed: %d\n", ret);
+
+ return ret;
+}
+module_init(ath10k_pci_init);
+
+static void __exit ath10k_pci_exit(void)
+{
+ pci_unregister_driver(&ath10k_pci_driver);
+ ath10k_ahb_exit();
+}
+
+module_exit(ath10k_pci_exit);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN PCIe/AHB devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* QCA988x 2.0 firmware files */
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA9887 1.0 firmware files */
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA6174 2.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA6174 3.1 firmware files */
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
+
+/* QCA9377 1.0 firmware files */
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
new file mode 100644
index 000000000..0ed436657
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -0,0 +1,260 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _PCI_H_
+#define _PCI_H_
+
+#include <linux/interrupt.h>
+
+#include "hw.h"
+#include "ce.h"
+#include "ahb.h"
+
+/*
+ * maximum number of bytes that can be
+ * handled atomically by DiagRead/DiagWrite
+ */
+#define DIAG_TRANSFER_LIMIT 2048
+
+struct bmi_xfer {
+ bool tx_done;
+ bool rx_done;
+ bool wait_for_resp;
+ u32 resp_len;
+};
+
+/*
+ * PCI-specific Target state
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ *
+ * Much of this may be of interest to the Host so
+ * HOST_INTEREST->hi_interconnect_state points here
+ * (and all members are 32-bit quantities in order to
+ * facilitate Host access). In particular, Host software is
+ * required to initialize pipe_cfg_addr and svc_to_pipe_map.
+ */
+struct pcie_state {
+ /* Pipe configuration Target address */
+ /* NB: ce_pipe_config[CE_COUNT] */
+ u32 pipe_cfg_addr;
+
+ /* Service to pipe map Target address */
+ /* NB: service_to_pipe[PIPE_TO_CE_MAP_CN] */
+ u32 svc_to_pipe_map;
+
+ /* number of MSI interrupts requested */
+ u32 msi_requested;
+
+ /* number of MSI interrupts granted */
+ u32 msi_granted;
+
+ /* Message Signalled Interrupt address */
+ u32 msi_addr;
+
+ /* Base data */
+ u32 msi_data;
+
+ /*
+ * Data for firmware interrupt;
+ * MSI data for other interrupts are
+ * in various SoC registers
+ */
+ u32 msi_fw_intr_data;
+
+ /* PCIE_PWR_METHOD_* */
+ u32 power_mgmt_method;
+
+ /* PCIE_CONFIG_FLAG_* */
+ u32 config_flags;
+};
+
+/* PCIE_CONFIG_FLAG definitions */
+#define PCIE_CONFIG_FLAG_ENABLE_L1 0x0000001
+
+/* Per-pipe state. */
+struct ath10k_pci_pipe {
+ /* Handle of underlying Copy Engine */
+ struct ath10k_ce_pipe *ce_hdl;
+
+ /* Our pipe number; facilitiates use of pipe_info ptrs. */
+ u8 pipe_num;
+
+ /* Convenience back pointer to hif_ce_state. */
+ struct ath10k *hif_ce_state;
+
+ size_t buf_sz;
+
+ /* protects compl_free and num_send_allowed */
+ spinlock_t pipe_lock;
+};
+
+struct ath10k_pci_supp_chip {
+ u32 dev_id;
+ u32 rev_id;
+};
+
+enum ath10k_pci_irq_mode {
+ ATH10K_PCI_IRQ_AUTO = 0,
+ ATH10K_PCI_IRQ_LEGACY = 1,
+ ATH10K_PCI_IRQ_MSI = 2,
+};
+
+struct ath10k_pci {
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct ath10k *ar;
+ void __iomem *mem;
+ size_t mem_len;
+
+ /* Operating interrupt mode */
+ enum ath10k_pci_irq_mode oper_irq_mode;
+
+ struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
+
+ /* Copy Engine used for Diagnostic Accesses */
+ struct ath10k_ce_pipe *ce_diag;
+
+ struct ath10k_ce ce;
+ struct timer_list rx_post_retry;
+
+ /* Due to HW quirks it is recommended to disable ASPM during device
+ * bootup. To do that the original PCI-E Link Control is stored before
+ * device bootup is executed and re-programmed later.
+ */
+ u16 link_ctl;
+
+ /* Protects ps_awake and ps_wake_refcount */
+ spinlock_t ps_lock;
+
+ /* The device has a special powersave-oriented register. When device is
+ * considered asleep it drains less power and driver is forbidden from
+ * accessing most MMIO registers. If host were to access them without
+ * waking up the device might scribble over host memory or return
+ * 0xdeadbeef readouts.
+ */
+ unsigned long ps_wake_refcount;
+
+ /* Waking up takes some time (up to 2ms in some cases) so it can be bad
+ * for latency. To mitigate this the device isn't immediately allowed
+ * to sleep after all references are undone - instead there's a grace
+ * period after which the powersave register is updated unless some
+ * activity to/from device happened in the meantime.
+ *
+ * Also see comments on ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC.
+ */
+ struct timer_list ps_timer;
+
+ /* MMIO registers are used to communicate with the device. With
+ * intensive traffic accessing powersave register would be a bit
+ * wasteful overhead and would needlessly stall CPU. It is far more
+ * efficient to rely on a variable in RAM and update it only upon
+ * powersave register state changes.
+ */
+ bool ps_awake;
+
+ /* pci power save, disable for QCA988X and QCA99X0.
+ * Writing 'false' to this variable avoids frequent locking
+ * on MMIO read/write.
+ */
+ bool pci_ps;
+
+ /* Chip specific pci reset routine used to do a safe reset */
+ int (*pci_soft_reset)(struct ath10k *ar);
+
+ /* Chip specific pci full reset function */
+ int (*pci_hard_reset)(struct ath10k *ar);
+
+ /* chip specific methods for converting target CPU virtual address
+ * space to CE address space
+ */
+ u32 (*targ_cpu_to_ce_addr)(struct ath10k *ar, u32 addr);
+
+ /* Keep this entry in the last, memory for struct ath10k_ahb is
+ * allocated (ahb support enabled case) in the continuation of
+ * this struct.
+ */
+ struct ath10k_ahb ahb[0];
+};
+
+static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
+{
+ return (struct ath10k_pci *)ar->drv_priv;
+}
+
+#define ATH10K_PCI_RX_POST_RETRY_MS 50
+#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
+#define PCIE_WAKE_TIMEOUT 30000 /* 30ms */
+#define PCIE_WAKE_LATE_US 10000 /* 10ms */
+
+#define BAR_NUM 0
+
+#define CDC_WAR_MAGIC_STR 0xceef0000
+#define CDC_WAR_DATA_CE 4
+
+/* Wait up to this many Ms for a Diagnostic Access CE operation to complete */
+#define DIAG_ACCESS_CE_TIMEOUT_MS 10
+
+void ath10k_pci_write32(struct ath10k *ar, u32 offset, u32 value);
+void ath10k_pci_soc_write32(struct ath10k *ar, u32 addr, u32 val);
+void ath10k_pci_reg_write32(struct ath10k *ar, u32 addr, u32 val);
+
+u32 ath10k_pci_read32(struct ath10k *ar, u32 offset);
+u32 ath10k_pci_soc_read32(struct ath10k *ar, u32 addr);
+u32 ath10k_pci_reg_read32(struct ath10k *ar, u32 addr);
+
+int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items);
+int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len);
+int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes);
+int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, void *req, u32 req_len,
+ void *resp, u32 *resp_len);
+int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath10k_pci_hif_get_default_pipe(struct ath10k *ar, u8 *ul_pipe,
+ u8 *dl_pipe);
+void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force);
+u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe);
+void ath10k_pci_hif_power_down(struct ath10k *ar);
+int ath10k_pci_alloc_pipes(struct ath10k *ar);
+void ath10k_pci_free_pipes(struct ath10k *ar);
+void ath10k_pci_free_pipes(struct ath10k *ar);
+void ath10k_pci_rx_replenish_retry(struct timer_list *t);
+void ath10k_pci_ce_deinit(struct ath10k *ar);
+void ath10k_pci_init_napi(struct ath10k *ar);
+int ath10k_pci_init_pipes(struct ath10k *ar);
+int ath10k_pci_init_config(struct ath10k *ar);
+void ath10k_pci_rx_post(struct ath10k *ar);
+void ath10k_pci_flush(struct ath10k *ar);
+void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
+bool ath10k_pci_irq_pending(struct ath10k *ar);
+void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
+void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
+int ath10k_pci_wait_for_target_init(struct ath10k *ar);
+int ath10k_pci_setup_resource(struct ath10k *ar);
+void ath10k_pci_release_resource(struct ath10k *ar);
+
+/* QCA6174 is known to have Tx/Rx issues when SOC_WAKE register is poked too
+ * frequently. To avoid this put SoC to sleep after a very conservative grace
+ * period. Adjust with great care.
+ */
+#define ATH10K_PCI_SLEEP_GRACE_PERIOD_MSEC 60
+
+#endif /* _PCI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
new file mode 100644
index 000000000..ea4075d45
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -0,0 +1,1280 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _RX_DESC_H_
+#define _RX_DESC_H_
+
+#include <linux/bitops.h>
+
+enum rx_attention_flags {
+ RX_ATTENTION_FLAGS_FIRST_MPDU = BIT(0),
+ RX_ATTENTION_FLAGS_LAST_MPDU = BIT(1),
+ RX_ATTENTION_FLAGS_MCAST_BCAST = BIT(2),
+ RX_ATTENTION_FLAGS_PEER_IDX_INVALID = BIT(3),
+ RX_ATTENTION_FLAGS_PEER_IDX_TIMEOUT = BIT(4),
+ RX_ATTENTION_FLAGS_POWER_MGMT = BIT(5),
+ RX_ATTENTION_FLAGS_NON_QOS = BIT(6),
+ RX_ATTENTION_FLAGS_NULL_DATA = BIT(7),
+ RX_ATTENTION_FLAGS_MGMT_TYPE = BIT(8),
+ RX_ATTENTION_FLAGS_CTRL_TYPE = BIT(9),
+ RX_ATTENTION_FLAGS_MORE_DATA = BIT(10),
+ RX_ATTENTION_FLAGS_EOSP = BIT(11),
+ RX_ATTENTION_FLAGS_U_APSD_TRIGGER = BIT(12),
+ RX_ATTENTION_FLAGS_FRAGMENT = BIT(13),
+ RX_ATTENTION_FLAGS_ORDER = BIT(14),
+ RX_ATTENTION_FLAGS_CLASSIFICATION = BIT(15),
+ RX_ATTENTION_FLAGS_OVERFLOW_ERR = BIT(16),
+ RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR = BIT(17),
+ RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL = BIT(18),
+ RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL = BIT(19),
+ RX_ATTENTION_FLAGS_SA_IDX_INVALID = BIT(20),
+ RX_ATTENTION_FLAGS_DA_IDX_INVALID = BIT(21),
+ RX_ATTENTION_FLAGS_SA_IDX_TIMEOUT = BIT(22),
+ RX_ATTENTION_FLAGS_DA_IDX_TIMEOUT = BIT(23),
+ RX_ATTENTION_FLAGS_ENCRYPT_REQUIRED = BIT(24),
+ RX_ATTENTION_FLAGS_DIRECTED = BIT(25),
+ RX_ATTENTION_FLAGS_BUFFER_FRAGMENT = BIT(26),
+ RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR = BIT(27),
+ RX_ATTENTION_FLAGS_TKIP_MIC_ERR = BIT(28),
+ RX_ATTENTION_FLAGS_DECRYPT_ERR = BIT(29),
+ RX_ATTENTION_FLAGS_FCS_ERR = BIT(30),
+ RX_ATTENTION_FLAGS_MSDU_DONE = BIT(31),
+};
+
+struct rx_attention {
+ __le32 flags; /* %RX_ATTENTION_FLAGS_ */
+} __packed;
+
+/*
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * last_mpdu
+ * Indicates the last MSDU of the last MPDU of the PPDU. The
+ * PPDU end status will only be valid when this bit is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * peer_idx_invalid
+ * Indicates no matching entries within the the max search
+ * count. Only set when first_msdu is set.
+ *
+ * peer_idx_timeout
+ * Indicates an unsuccessful search for the peer index due to
+ * timeout. Only set when first_msdu is set.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * u_apsd_trigger
+ * Set if packet is U-APSD trigger. Key table will have bits
+ * per TID to indicate U-APSD trigger.
+ *
+ * fragment
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame
+ * control or the fragment number is not zero. Only set when
+ * first_msdu is set.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only set
+ * when first_msdu is set.
+ *
+ * classification
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * msdu_length_err
+ * Indicates that the MSDU length from the 802.3 encapsulated
+ * length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful search for the source MAC address
+ * due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful search for the destination MAC
+ * address due to the expiring of the search timer.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ */
+
+struct rx_frag_info {
+ u8 ring0_more_count;
+ u8 ring1_more_count;
+ u8 ring2_more_count;
+ u8 ring3_more_count;
+ u8 ring4_more_count;
+ u8 ring5_more_count;
+ u8 ring6_more_count;
+ u8 ring7_more_count;
+} __packed;
+
+/*
+ * ring0_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 0. Field is filled in by the RX_DMA.
+ *
+ * ring1_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 1. Field is filled in by the RX_DMA.
+ *
+ * ring2_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 2. Field is filled in by the RX_DMA.
+ *
+ * ring3_more_count
+ * Indicates the number of more buffers associated with RX DMA
+ * ring 3. Field is filled in by the RX_DMA.
+ */
+
+enum htt_rx_mpdu_encrypt_type {
+ HTT_RX_MPDU_ENCRYPT_WEP40 = 0,
+ HTT_RX_MPDU_ENCRYPT_WEP104 = 1,
+ HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC = 2,
+ HTT_RX_MPDU_ENCRYPT_WEP128 = 3,
+ HTT_RX_MPDU_ENCRYPT_TKIP_WPA = 4,
+ HTT_RX_MPDU_ENCRYPT_WAPI = 5,
+ HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
+ HTT_RX_MPDU_ENCRYPT_NONE = 7,
+ HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10,
+};
+
+#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
+#define RX_MPDU_START_INFO0_PEER_IDX_LSB 0
+#define RX_MPDU_START_INFO0_SEQ_NUM_MASK 0x0fff0000
+#define RX_MPDU_START_INFO0_SEQ_NUM_LSB 16
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_MASK 0xf0000000
+#define RX_MPDU_START_INFO0_ENCRYPT_TYPE_LSB 28
+#define RX_MPDU_START_INFO0_FROM_DS BIT(11)
+#define RX_MPDU_START_INFO0_TO_DS BIT(12)
+#define RX_MPDU_START_INFO0_ENCRYPTED BIT(13)
+#define RX_MPDU_START_INFO0_RETRY BIT(14)
+#define RX_MPDU_START_INFO0_TXBF_H_INFO BIT(15)
+
+#define RX_MPDU_START_INFO1_TID_MASK 0xf0000000
+#define RX_MPDU_START_INFO1_TID_LSB 28
+#define RX_MPDU_START_INFO1_DIRECTED BIT(16)
+
+struct rx_mpdu_start {
+ __le32 info0;
+ union {
+ struct {
+ __le32 pn31_0;
+ __le32 info1; /* %RX_MPDU_START_INFO1_ */
+ } __packed;
+ struct {
+ u8 pn[6];
+ } __packed;
+ } __packed;
+} __packed;
+
+/*
+ * peer_idx
+ * The index of the address search table which associated with
+ * the peer table entry corresponding to this MPDU. Only valid
+ * when first_msdu is set.
+ *
+ * fr_ds
+ * Set if the from DS bit is set in the frame control. Only
+ * valid when first_msdu is set.
+ *
+ * to_ds
+ * Set if the to DS bit is set in the frame control. Only
+ * valid when first_msdu is set.
+ *
+ * encrypted
+ * Protected bit from the frame control. Only valid when
+ * first_msdu is set.
+ *
+ * retry
+ * Retry bit from the frame control. Only valid when
+ * first_msdu is set.
+ *
+ * txbf_h_info
+ * The MPDU data will contain H information. Primarily used
+ * for debug.
+ *
+ * seq_num
+ * The sequence number from the 802.11 header. Only valid when
+ * first_msdu is set.
+ *
+ * encrypt_type
+ * Indicates type of decrypt cipher used (as defined in the
+ * peer table)
+ * 0: WEP40
+ * 1: WEP104
+ * 2: TKIP without MIC
+ * 3: WEP128
+ * 4: TKIP (WPA)
+ * 5: WAPI
+ * 6: AES-CCM (WPA2)
+ * 7: No cipher
+ * Only valid when first_msdu_is set
+ *
+ * pn_31_0
+ * Bits [31:0] of the PN number extracted from the IV field
+ * WEP: IV = {key_id_octet, pn2, pn1, pn0}. Only pn[23:0] is
+ * valid.
+ * TKIP: IV = {pn5, pn4, pn3, pn2, key_id_octet, pn0,
+ * WEPSeed[1], pn1}. Only pn[47:0] is valid.
+ * AES-CCM: IV = {pn5, pn4, pn3, pn2, key_id_octet, 0x0, pn1,
+ * pn0}. Only pn[47:0] is valid.
+ * WAPI: IV = {key_id_octet, 0x0, pn15, pn14, pn13, pn12, pn11,
+ * pn10, pn9, pn8, pn7, pn6, pn5, pn4, pn3, pn2, pn1, pn0}.
+ * The ext_wapi_pn[127:48] in the rx_msdu_misc descriptor and
+ * pn[47:0] are valid.
+ * Only valid when first_msdu is set.
+ *
+ * pn_47_32
+ * Bits [47:32] of the PN number. See description for
+ * pn_31_0. The remaining PN fields are in the rx_msdu_end
+ * descriptor
+ *
+ * pn
+ * Use this field to access the pn without worrying about
+ * byte-order and bitmasking/bitshifting.
+ *
+ * directed
+ * See definition in RX attention descriptor
+ *
+ * reserved_2
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ * tid
+ * The TID field in the QoS control field
+ */
+
+#define RX_MPDU_END_INFO0_RESERVED_0_MASK 0x00001fff
+#define RX_MPDU_END_INFO0_RESERVED_0_LSB 0
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_MASK 0x0fff0000
+#define RX_MPDU_END_INFO0_POST_DELIM_CNT_LSB 16
+#define RX_MPDU_END_INFO0_OVERFLOW_ERR BIT(13)
+#define RX_MPDU_END_INFO0_LAST_MPDU BIT(14)
+#define RX_MPDU_END_INFO0_POST_DELIM_ERR BIT(15)
+#define RX_MPDU_END_INFO0_MPDU_LENGTH_ERR BIT(28)
+#define RX_MPDU_END_INFO0_TKIP_MIC_ERR BIT(29)
+#define RX_MPDU_END_INFO0_DECRYPT_ERR BIT(30)
+#define RX_MPDU_END_INFO0_FCS_ERR BIT(31)
+
+struct rx_mpdu_end {
+ __le32 info0;
+} __packed;
+
+/*
+ * reserved_0
+ * Reserved
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * last_mpdu
+ * Indicates that this is the last MPDU of a PPDU.
+ *
+ * post_delim_err
+ * Indicates that a delimiter FCS error occurred after this
+ * MPDU before the next MPDU. Only valid when last_msdu is
+ * set.
+ *
+ * post_delim_cnt
+ * Count of the delimiters after this MPDU. This requires the
+ * last MPDU to be held until all the EOF descriptors have been
+ * received. This may be inefficient in the future when
+ * ML-MIMO is used. Only valid when last_mpdu is set.
+ *
+ * mpdu_length_err
+ * See definition in RX attention descriptor
+ *
+ * tkip_mic_err
+ * See definition in RX attention descriptor
+ *
+ * decrypt_err
+ * See definition in RX attention descriptor
+ *
+ * fcs_err
+ * See definition in RX attention descriptor
+ */
+
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_START_INFO0_MSDU_LENGTH_LSB 0
+#define RX_MSDU_START_INFO0_IP_OFFSET_MASK 0x000fc000
+#define RX_MSDU_START_INFO0_IP_OFFSET_LSB 14
+#define RX_MSDU_START_INFO0_RING_MASK_MASK 0x00f00000
+#define RX_MSDU_START_INFO0_RING_MASK_LSB 20
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_MASK 0x7f000000
+#define RX_MSDU_START_INFO0_TCP_UDP_OFFSET_LSB 24
+
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_MASK 0x000000ff
+#define RX_MSDU_START_INFO1_MSDU_NUMBER_LSB 0
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_MASK 0x00000300
+#define RX_MSDU_START_INFO1_DECAP_FORMAT_LSB 8
+#define RX_MSDU_START_INFO1_SA_IDX_MASK 0x07ff0000
+#define RX_MSDU_START_INFO1_SA_IDX_LSB 16
+#define RX_MSDU_START_INFO1_IPV4_PROTO BIT(10)
+#define RX_MSDU_START_INFO1_IPV6_PROTO BIT(11)
+#define RX_MSDU_START_INFO1_TCP_PROTO BIT(12)
+#define RX_MSDU_START_INFO1_UDP_PROTO BIT(13)
+#define RX_MSDU_START_INFO1_IP_FRAG BIT(14)
+#define RX_MSDU_START_INFO1_TCP_ONLY_ACK BIT(15)
+
+#define RX_MSDU_START_INFO2_DA_IDX_MASK 0x000007ff
+#define RX_MSDU_START_INFO2_DA_IDX_LSB 0
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_MASK 0x00ff0000
+#define RX_MSDU_START_INFO2_IP_PROTO_FIELD_LSB 16
+#define RX_MSDU_START_INFO2_DA_BCAST_MCAST BIT(11)
+
+/* The decapped header (rx_hdr_status) contains the following:
+ * a) 802.11 header
+ * [padding to 4 bytes]
+ * b) HW crypto parameter
+ * - 0 bytes for no security
+ * - 4 bytes for WEP
+ * - 8 bytes for TKIP, AES
+ * [padding to 4 bytes]
+ * c) A-MSDU subframe header (14 bytes) if appliable
+ * d) LLC/SNAP (RFC1042, 8 bytes)
+ *
+ * In case of A-MSDU only first frame in sequence contains (a) and (b).
+ */
+enum rx_msdu_decap_format {
+ RX_MSDU_DECAP_RAW = 0,
+
+ /* Note: QoS frames are reported as non-QoS. The rx_hdr_status in
+ * htt_rx_desc contains the original decapped 802.11 header.
+ */
+ RX_MSDU_DECAP_NATIVE_WIFI = 1,
+
+ /* Payload contains an ethernet header (struct ethhdr). */
+ RX_MSDU_DECAP_ETHERNET2_DIX = 2,
+
+ /* Payload contains two 48-bit addresses and 2-byte length (14 bytes
+ * total), followed by an RFC1042 header (8 bytes).
+ */
+ RX_MSDU_DECAP_8023_SNAP_LLC = 3
+};
+
+struct rx_msdu_start_common {
+ __le32 info0; /* %RX_MSDU_START_INFO0_ */
+ __le32 flow_id_crc;
+ __le32 info1; /* %RX_MSDU_START_INFO1_ */
+} __packed;
+
+struct rx_msdu_start_qca99x0 {
+ __le32 info2; /* %RX_MSDU_START_INFO2_ */
+} __packed;
+
+struct rx_msdu_start_wcn3990 {
+ __le32 info2; /* %RX_MSDU_START_INFO2_ */
+ __le32 info3; /* %RX_MSDU_START_INFO3_ */
+} __packed;
+
+struct rx_msdu_start {
+ struct rx_msdu_start_common common;
+ union {
+ struct rx_msdu_start_qca99x0 qca99x0;
+ struct rx_msdu_start_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+/*
+ * msdu_length
+ * MSDU length in bytes after decapsulation. This field is
+ * still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation
+ *
+ * ip_offset
+ * Indicates the IP offset in bytes from the start of the
+ * packet after decapsulation. Only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ring_mask
+ * Indicates the destination RX rings for this MSDU.
+ *
+ * tcp_udp_offset
+ * Indicates the offset in bytes to the start of TCP or UDP
+ * header from the start of the IP header after decapsulation.
+ * Only valid if tcp_prot or udp_prot is set. The value 0
+ * indicates that the offset is longer than 127 bytes.
+ *
+ * reserved_0c
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ * flow_id_crc
+ * The flow_id_crc runs CRC32 on the following information:
+ * IPv4 option: dest_addr[31:0], src_addr [31:0], {24'b0,
+ * protocol[7:0]}.
+ * IPv6 option: dest_addr[127:0], src_addr [127:0], {24'b0,
+ * next_header[7:0]}
+ * UDP case: sort_port[15:0], dest_port[15:0]
+ * TCP case: sort_port[15:0], dest_port[15:0],
+ * {header_length[3:0], 6'b0, flags[5:0], window_size[15:0]},
+ * {16'b0, urgent_ptr[15:0]}, all options except 32-bit
+ * timestamp.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_format
+ * Indicates the format after decapsulation:
+ * 0: RAW: No decapsulation
+ * 1: Native WiFi
+ * 2: Ethernet 2 (DIX)
+ * 3: 802.3 (SNAP/LLC)
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP
+ * protocol indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP
+ * protocol indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a
+ * fragmented IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * sa_idx
+ * The offset in the address table which matches the MAC source
+ * address.
+ *
+ * reserved_2b
+ * Reserved: HW should fill with zero. FW should ignore.
+ */
+
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_MASK 0x00003fff
+#define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0
+#define RX_MSDU_END_INFO0_FIRST_MSDU BIT(14)
+#define RX_MSDU_END_INFO0_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO0_PRE_DELIM_ERR BIT(30)
+#define RX_MSDU_END_INFO0_RESERVED_3B BIT(31)
+
+struct rx_msdu_end_common {
+ __le16 ip_hdr_cksum;
+ __le16 tcp_hdr_cksum;
+ u8 key_id_octet;
+ u8 classification_filter;
+ u8 wapi_pn[10];
+ __le32 info0;
+} __packed;
+
+#define RX_MSDU_END_INFO1_TCP_FLAG_MASK 0x000001ff
+#define RX_MSDU_END_INFO1_TCP_FLAG_LSB 0
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_MASK 0x00001c00
+#define RX_MSDU_END_INFO1_L3_HDR_PAD_LSB 10
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_MASK 0xffff0000
+#define RX_MSDU_END_INFO1_WINDOW_SIZE_LSB 16
+#define RX_MSDU_END_INFO1_IRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET_MASK 0x0000003f
+#define RX_MSDU_END_INFO2_DA_OFFSET_LSB 0
+#define RX_MSDU_END_INFO2_SA_OFFSET_MASK 0x00000fc0
+#define RX_MSDU_END_INFO2_SA_OFFSET_LSB 6
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_MASK 0x0003f000
+#define RX_MSDU_END_INFO2_TYPE_OFFSET_LSB 12
+
+struct rx_msdu_end_qca99x0 {
+ __le32 ipv6_crc;
+ __le32 tcp_seq_no;
+ __le32 tcp_ack_no;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+struct rx_msdu_end_wcn3990 {
+ __le32 ipv6_crc;
+ __le32 tcp_seq_no;
+ __le32 tcp_ack_no;
+ __le32 info1;
+ __le32 info2;
+ __le32 rule_indication_0;
+ __le32 rule_indication_1;
+ __le32 rule_indication_2;
+ __le32 rule_indication_3;
+} __packed;
+
+struct rx_msdu_end {
+ struct rx_msdu_end_common common;
+ union {
+ struct rx_msdu_end_qca99x0 qca99x0;
+ struct rx_msdu_end_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+/*
+ *ip_hdr_chksum
+ * This can include the IP header checksum or the pseudo header
+ * checksum used by TCP/UDP checksum.
+ *
+ *tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ *key_id_octet
+ * The key ID octet from the IV. Only valid when first_msdu is
+ * set.
+ *
+ *classification_filter
+ * Indicates the number classification filter rule
+ *
+ *ext_wapi_pn_63_48
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [63:48] (pn6 and pn7). The
+ * WAPI PN bits [63:0] are in the pn field of the rx_mpdu_start
+ * descriptor.
+ *
+ *ext_wapi_pn_95_64
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [95:64] (pn8, pn9, pn10 and
+ * pn11).
+ *
+ *ext_wapi_pn_127_96
+ * Extension PN (packet number) which is only used by WAPI.
+ * This corresponds to WAPI PN bits [127:96] (pn12, pn13, pn14,
+ * pn15).
+ *
+ *reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when
+ * first_msdu is set. This field is taken directly from the
+ * length field of the A-MPDU delimiter or the preamble length
+ * field for non-A-MPDU frames.
+ *
+ *first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated
+ * MSDU frame: normal MPDU. Interior MSDU in an A-MSDU shall
+ * have both first_mpdu and last_mpdu bits set to 0.
+ *
+ *last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is
+ * only valid when last_msdu is set.
+ *
+ *reserved_3a
+ * Reserved: HW should fill with zero. FW should ignore.
+ *
+ *pre_delim_err
+ * Indicates that the first delimiter had a FCS failure. Only
+ * valid when first_mpdu and first_msdu are set.
+ *
+ *reserved_3b
+ * Reserved: HW should fill with zero. FW should ignore.
+ */
+
+#define HTT_RX_PPDU_START_PREAMBLE_LEGACY 0x04
+#define HTT_RX_PPDU_START_PREAMBLE_HT 0x08
+#define HTT_RX_PPDU_START_PREAMBLE_HT_WITH_TXBF 0x09
+#define HTT_RX_PPDU_START_PREAMBLE_VHT 0x0C
+#define HTT_RX_PPDU_START_PREAMBLE_VHT_WITH_TXBF 0x0D
+
+#define RX_PPDU_START_INFO0_IS_GREENFIELD BIT(0)
+
+#define RX_PPDU_START_INFO1_L_SIG_RATE_MASK 0x0000000f
+#define RX_PPDU_START_INFO1_L_SIG_RATE_LSB 0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_MASK 0x0001ffe0
+#define RX_PPDU_START_INFO1_L_SIG_LENGTH_LSB 5
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_MASK 0x00fc0000
+#define RX_PPDU_START_INFO1_L_SIG_TAIL_LSB 18
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_MASK 0xff000000
+#define RX_PPDU_START_INFO1_PREAMBLE_TYPE_LSB 24
+#define RX_PPDU_START_INFO1_L_SIG_RATE_SELECT BIT(4)
+#define RX_PPDU_START_INFO1_L_SIG_PARITY BIT(17)
+
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_MASK 0x00ffffff
+#define RX_PPDU_START_INFO2_HT_SIG_VHT_SIG_A_1_LSB 0
+
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_MASK 0x00ffffff
+#define RX_PPDU_START_INFO3_HT_SIG_VHT_SIG_A_2_LSB 0
+#define RX_PPDU_START_INFO3_TXBF_H_INFO BIT(24)
+
+#define RX_PPDU_START_INFO4_VHT_SIG_B_MASK 0x1fffffff
+#define RX_PPDU_START_INFO4_VHT_SIG_B_LSB 0
+
+#define RX_PPDU_START_INFO5_SERVICE_MASK 0x0000ffff
+#define RX_PPDU_START_INFO5_SERVICE_LSB 0
+
+/* No idea what this flag means. It seems to be always set in rate. */
+#define RX_PPDU_START_RATE_FLAG BIT(3)
+
+struct rx_ppdu_start {
+ struct {
+ u8 pri20_mhz;
+ u8 ext20_mhz;
+ u8 ext40_mhz;
+ u8 ext80_mhz;
+ } rssi_chains[4];
+ u8 rssi_comb;
+ __le16 rsvd0;
+ u8 info0; /* %RX_PPDU_START_INFO0_ */
+ __le32 info1; /* %RX_PPDU_START_INFO1_ */
+ __le32 info2; /* %RX_PPDU_START_INFO2_ */
+ __le32 info3; /* %RX_PPDU_START_INFO3_ */
+ __le32 info4; /* %RX_PPDU_START_INFO4_ */
+ __le32 info5; /* %RX_PPDU_START_INFO5_ */
+} __packed;
+
+/*
+ * rssi_chain0_pri20
+ * RSSI of RX PPDU on chain 0 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec20
+ * RSSI of RX PPDU on chain 0 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec40
+ * RSSI of RX PPDU on chain 0 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain0_sec80
+ * RSSI of RX PPDU on chain 0 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_pri20
+ * RSSI of RX PPDU on chain 1 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec20
+ * RSSI of RX PPDU on chain 1 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec40
+ * RSSI of RX PPDU on chain 1 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain1_sec80
+ * RSSI of RX PPDU on chain 1 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_pri20
+ * RSSI of RX PPDU on chain 2 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec20
+ * RSSI of RX PPDU on chain 2 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec40
+ * RSSI of RX PPDU on chain 2 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain2_sec80
+ * RSSI of RX PPDU on chain 2 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_pri20
+ * RSSI of RX PPDU on chain 3 of primary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec20
+ * RSSI of RX PPDU on chain 3 of secondary 20 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec40
+ * RSSI of RX PPDU on chain 3 of secondary 40 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_chain3_sec80
+ * RSSI of RX PPDU on chain 3 of secondary 80 MHz bandwidth.
+ * Value of 0x80 indicates invalid.
+ *
+ * rssi_comb
+ * The combined RSSI of RX PPDU of all active chains and
+ * bandwidths. Value of 0x80 indicates invalid.
+ *
+ * reserved_4a
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * is_greenfield
+ * Do we really support this?
+ *
+ * reserved_4b
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * l_sig_rate
+ * If l_sig_rate_select is 0:
+ * 0x8: OFDM 48 Mbps
+ * 0x9: OFDM 24 Mbps
+ * 0xA: OFDM 12 Mbps
+ * 0xB: OFDM 6 Mbps
+ * 0xC: OFDM 54 Mbps
+ * 0xD: OFDM 36 Mbps
+ * 0xE: OFDM 18 Mbps
+ * 0xF: OFDM 9 Mbps
+ * If l_sig_rate_select is 1:
+ * 0x8: CCK 11 Mbps long preamble
+ * 0x9: CCK 5.5 Mbps long preamble
+ * 0xA: CCK 2 Mbps long preamble
+ * 0xB: CCK 1 Mbps long preamble
+ * 0xC: CCK 11 Mbps short preamble
+ * 0xD: CCK 5.5 Mbps short preamble
+ * 0xE: CCK 2 Mbps short preamble
+ *
+ * l_sig_rate_select
+ * Legacy signal rate select. If set then l_sig_rate indicates
+ * CCK rates. If clear then l_sig_rate indicates OFDM rates.
+ *
+ * l_sig_length
+ * Length of legacy frame in octets.
+ *
+ * l_sig_parity
+ * Odd parity over l_sig_rate and l_sig_length
+ *
+ * l_sig_tail
+ * Tail bits for Viterbi decoder
+ *
+ * preamble_type
+ * Indicates the type of preamble ahead:
+ * 0x4: Legacy (OFDM/CCK)
+ * 0x8: HT
+ * 0x9: HT with TxBF
+ * 0xC: VHT
+ * 0xD: VHT with TxBF
+ * 0x80 - 0xFF: Reserved for special baseband data types such
+ * as radar and spectral scan.
+ *
+ * ht_sig_vht_sig_a_1
+ * If preamble_type == 0x8 or 0x9
+ * HT-SIG (first 24 bits)
+ * If preamble_type == 0xC or 0xD
+ * VHT-SIG A (first 24 bits)
+ * Else
+ * Reserved
+ *
+ * reserved_6
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ht_sig_vht_sig_a_2
+ * If preamble_type == 0x8 or 0x9
+ * HT-SIG (last 24 bits)
+ * If preamble_type == 0xC or 0xD
+ * VHT-SIG A (last 24 bits)
+ * Else
+ * Reserved
+ *
+ * txbf_h_info
+ * Indicates that the packet data carries H information which
+ * is used for TxBF debug.
+ *
+ * reserved_7
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * vht_sig_b
+ * WiFi 1.0 and WiFi 2.0 will likely have this field to be all
+ * 0s since the BB does not plan on decoding VHT SIG-B.
+ *
+ * reserved_8
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * service
+ * Service field from BB for OFDM, HT and VHT packets. CCK
+ * packets will have service field of 0.
+ *
+ * reserved_9
+ * Reserved: HW should fill with 0, FW should ignore.
+ */
+
+#define RX_PPDU_END_FLAGS_PHY_ERR BIT(0)
+#define RX_PPDU_END_FLAGS_RX_LOCATION BIT(1)
+#define RX_PPDU_END_FLAGS_TXBF_H_INFO BIT(2)
+
+#define RX_PPDU_END_INFO0_RX_ANTENNA_MASK 0x00ffffff
+#define RX_PPDU_END_INFO0_RX_ANTENNA_LSB 0
+#define RX_PPDU_END_INFO0_FLAGS_TX_HT_VHT_ACK BIT(24)
+#define RX_PPDU_END_INFO0_BB_CAPTURED_CHANNEL BIT(25)
+
+#define RX_PPDU_END_INFO1_PEER_IDX_MASK 0x1ffc
+#define RX_PPDU_END_INFO1_PEER_IDX_LSB 2
+#define RX_PPDU_END_INFO1_BB_DATA BIT(0)
+#define RX_PPDU_END_INFO1_PEER_IDX_VALID BIT(1)
+#define RX_PPDU_END_INFO1_PPDU_DONE BIT(15)
+
+struct rx_ppdu_end_common {
+ __le32 evm_p0;
+ __le32 evm_p1;
+ __le32 evm_p2;
+ __le32 evm_p3;
+ __le32 evm_p4;
+ __le32 evm_p5;
+ __le32 evm_p6;
+ __le32 evm_p7;
+ __le32 evm_p8;
+ __le32 evm_p9;
+ __le32 evm_p10;
+ __le32 evm_p11;
+ __le32 evm_p12;
+ __le32 evm_p13;
+ __le32 evm_p14;
+ __le32 evm_p15;
+ __le32 tsf_timestamp;
+ __le32 wb_timestamp;
+} __packed;
+
+struct rx_ppdu_end_qca988x {
+ u8 locationing_timestamp;
+ u8 phy_err_code;
+ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+ __le32 info0; /* %RX_PPDU_END_INFO0_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
+#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
+#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
+#define RX_PPDU_END_RTT_UNUSED_LSB 24
+#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
+
+struct rx_ppdu_end_qca6174 {
+ u8 locationing_timestamp;
+ u8 phy_err_code;
+ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
+ __le32 info0; /* %RX_PPDU_END_INFO0_ */
+ __le32 rtt; /* %RX_PPDU_END_RTT_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PKT_END_INFO0_RX_SUCCESS BIT(0)
+#define RX_PKT_END_INFO0_ERR_TX_INTERRUPT_RX BIT(3)
+#define RX_PKT_END_INFO0_ERR_OFDM_POWER_DROP BIT(4)
+#define RX_PKT_END_INFO0_ERR_OFDM_RESTART BIT(5)
+#define RX_PKT_END_INFO0_ERR_CCK_POWER_DROP BIT(6)
+#define RX_PKT_END_INFO0_ERR_CCK_RESTART BIT(7)
+
+#define RX_LOCATION_INFO_RTT_CORR_VAL_MASK 0x0001ffff
+#define RX_LOCATION_INFO_RTT_CORR_VAL_LSB 0
+#define RX_LOCATION_INFO_FAC_STATUS_MASK 0x000c0000
+#define RX_LOCATION_INFO_FAC_STATUS_LSB 18
+#define RX_LOCATION_INFO_PKT_BW_MASK 0x00700000
+#define RX_LOCATION_INFO_PKT_BW_LSB 20
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_MASK 0x01800000
+#define RX_LOCATION_INFO_RTT_TX_FRAME_PHASE_LSB 23
+#define RX_LOCATION_INFO_CIR_STATUS BIT(17)
+#define RX_LOCATION_INFO_RTT_MAC_PHY_PHASE BIT(25)
+#define RX_LOCATION_INFO_RTT_TX_DATA_START_X BIT(26)
+#define RX_LOCATION_INFO_HW_IFFT_MODE BIT(30)
+#define RX_LOCATION_INFO_RX_LOCATION_VALID BIT(31)
+
+struct rx_pkt_end {
+ __le32 info0; /* %RX_PKT_END_INFO0_ */
+ __le32 phy_timestamp_1;
+ __le32 phy_timestamp_2;
+} __packed;
+
+struct rx_pkt_end_wcn3990 {
+ __le32 info0; /* %RX_PKT_END_INFO0_ */
+ __le64 phy_timestamp_1;
+ __le64 phy_timestamp_2;
+} __packed;
+
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_MASK 0x00003fff
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_LSB 0
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_MASK 0x1fff8000
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_LSB 15
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_MASK 0xc0000000
+#define RX_LOCATION_INFO0_RTT_STRONGEST_CHAIN_LSB 30
+#define RX_LOCATION_INFO0_RTT_FAC_LEGACY_STATUS BIT(14)
+#define RX_LOCATION_INFO0_RTT_FAC_VHT_STATUS BIT(29)
+
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_MASK 0x0000000c
+#define RX_LOCATION_INFO1_RTT_PREAMBLE_TYPE_LSB 2
+#define RX_LOCATION_INFO1_PKT_BW_MASK 0x00000030
+#define RX_LOCATION_INFO1_PKT_BW_LSB 4
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_MASK 0x0000ff00
+#define RX_LOCATION_INFO1_SKIP_P_SKIP_BTCF_LSB 8
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_MASK 0x000f0000
+#define RX_LOCATION_INFO1_RTT_MSC_RATE_LSB 16
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_MASK 0x00300000
+#define RX_LOCATION_INFO1_RTT_PBD_LEG_BW_LSB 20
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_MASK 0x07c00000
+#define RX_LOCATION_INFO1_TIMING_BACKOFF_LSB 22
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_MASK 0x18000000
+#define RX_LOCATION_INFO1_RTT_TX_FRAME_PHASE_LSB 27
+#define RX_LOCATION_INFO1_RTT_CFR_STATUS BIT(0)
+#define RX_LOCATION_INFO1_RTT_CIR_STATUS BIT(1)
+#define RX_LOCATION_INFO1_RTT_GI_TYPE BIT(7)
+#define RX_LOCATION_INFO1_RTT_MAC_PHY_PHASE BIT(29)
+#define RX_LOCATION_INFO1_RTT_TX_DATA_START_X_PHASE BIT(30)
+#define RX_LOCATION_INFO1_RX_LOCATION_VALID BIT(31)
+
+struct rx_location_info {
+ __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+ __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+} __packed;
+
+struct rx_location_info_wcn3990 {
+ __le32 rx_location_info0; /* %RX_LOCATION_INFO0_ */
+ __le32 rx_location_info1; /* %RX_LOCATION_INFO1_ */
+ __le32 rx_location_info2; /* %RX_LOCATION_INFO2_ */
+} __packed;
+
+enum rx_phy_ppdu_end_info0 {
+ RX_PHY_PPDU_END_INFO0_ERR_RADAR = BIT(2),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_ABORT = BIT(3),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_NAP = BIT(4),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_TIMING = BIT(5),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_PARITY = BIT(6),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_RATE = BIT(7),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_LENGTH = BIT(8),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_RESTART = BIT(9),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_SERVICE = BIT(10),
+ RX_PHY_PPDU_END_INFO0_ERR_OFDM_POWER_DROP = BIT(11),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_BLOCKER = BIT(12),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_TIMING = BIT(13),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_HEADER_CRC = BIT(14),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_RATE = BIT(15),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_LENGTH = BIT(16),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_RESTART = BIT(17),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_SERVICE = BIT(18),
+ RX_PHY_PPDU_END_INFO0_ERR_CCK_POWER_DROP = BIT(19),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_CRC = BIT(20),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_LENGTH = BIT(21),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_RATE = BIT(22),
+ RX_PHY_PPDU_END_INFO0_ERR_HT_ZLF = BIT(23),
+ RX_PHY_PPDU_END_INFO0_ERR_FALSE_RADAR_EXT = BIT(24),
+ RX_PHY_PPDU_END_INFO0_ERR_GREEN_FIELD = BIT(25),
+ RX_PHY_PPDU_END_INFO0_ERR_SPECTRAL_SCAN = BIT(26),
+ RX_PHY_PPDU_END_INFO0_ERR_RX_DYN_BW = BIT(27),
+ RX_PHY_PPDU_END_INFO0_ERR_LEG_HT_MISMATCH = BIT(28),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_CRC = BIT(29),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_SIGA = BIT(30),
+ RX_PHY_PPDU_END_INFO0_ERR_VHT_LSIG = BIT(31),
+};
+
+enum rx_phy_ppdu_end_info1 {
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_NDP = BIT(0),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_NSYM = BIT(1),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_EXT_SYM = BIT(2),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID0 = BIT(3),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID1_62 = BIT(4),
+ RX_PHY_PPDU_END_INFO1_ERR_VHT_RX_SKIP_ID63 = BIT(5),
+ RX_PHY_PPDU_END_INFO1_ERR_OFDM_LDPC_DECODER = BIT(6),
+ RX_PHY_PPDU_END_INFO1_ERR_DEFER_NAP = BIT(7),
+ RX_PHY_PPDU_END_INFO1_ERR_FDOMAIN_TIMEOUT = BIT(8),
+ RX_PHY_PPDU_END_INFO1_ERR_LSIG_REL_CHECK = BIT(9),
+ RX_PHY_PPDU_END_INFO1_ERR_BT_COLLISION = BIT(10),
+ RX_PHY_PPDU_END_INFO1_ERR_MU_FEEDBACK = BIT(11),
+ RX_PHY_PPDU_END_INFO1_ERR_TX_INTERRUPT_RX = BIT(12),
+ RX_PHY_PPDU_END_INFO1_ERR_RX_CBF = BIT(13),
+};
+
+struct rx_phy_ppdu_end {
+ __le32 info0; /* %RX_PHY_PPDU_END_INFO0_ */
+ __le32 info1; /* %RX_PHY_PPDU_END_INFO1_ */
+} __packed;
+
+#define RX_PPDU_END_RX_TIMING_OFFSET_MASK 0x00000fff
+#define RX_PPDU_END_RX_TIMING_OFFSET_LSB 0
+
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_MASK 0x00ffffff
+#define RX_PPDU_END_RX_INFO_RX_ANTENNA_LSB 0
+#define RX_PPDU_END_RX_INFO_TX_HT_VHT_ACK BIT(24)
+#define RX_PPDU_END_RX_INFO_RX_PKT_END_VALID BIT(25)
+#define RX_PPDU_END_RX_INFO_RX_PHY_PPDU_END_VALID BIT(26)
+#define RX_PPDU_END_RX_INFO_RX_TIMING_OFFSET_VALID BIT(27)
+#define RX_PPDU_END_RX_INFO_BB_CAPTURED_CHANNEL BIT(28)
+#define RX_PPDU_END_RX_INFO_UNSUPPORTED_MU_NC BIT(29)
+#define RX_PPDU_END_RX_INFO_OTP_TXBF_DISABLE BIT(30)
+
+struct rx_ppdu_end_qca99x0 {
+ struct rx_pkt_end rx_pkt_end;
+ __le32 rx_location_info; /* %RX_LOCATION_INFO_ */
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end_qca9984 {
+ struct rx_pkt_end rx_pkt_end;
+ struct rx_location_info rx_location_info;
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset; /* %RX_PPDU_END_RX_TIMING_OFFSET_ */
+ __le32 rx_info; /* %RX_PPDU_END_RX_INFO_ */
+ __le16 bb_length;
+ __le16 info1; /* %RX_PPDU_END_INFO1_ */
+} __packed;
+
+struct rx_ppdu_end_wcn3990 {
+ struct rx_pkt_end_wcn3990 rx_pkt_end;
+ struct rx_location_info_wcn3990 rx_location_info;
+ struct rx_phy_ppdu_end rx_phy_ppdu_end;
+ __le32 rx_timing_offset;
+ __le32 reserved_info_0;
+ __le32 reserved_info_1;
+ __le32 rx_antenna_info;
+ __le32 rx_coex_info;
+ __le32 rx_mpdu_cnt_info;
+ __le64 phy_timestamp_tx;
+ __le32 rx_bb_length;
+} __packed;
+
+struct rx_ppdu_end {
+ struct rx_ppdu_end_common common;
+ union {
+ struct rx_ppdu_end_qca988x qca988x;
+ struct rx_ppdu_end_qca6174 qca6174;
+ struct rx_ppdu_end_qca99x0 qca99x0;
+ struct rx_ppdu_end_qca9984 qca9984;
+ struct rx_ppdu_end_wcn3990 wcn3990;
+ } __packed;
+} __packed;
+
+/*
+ * evm_p0
+ * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p1
+ * EVM for pilot 1. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p2
+ * EVM for pilot 2. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p3
+ * EVM for pilot 3. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p4
+ * EVM for pilot 4. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p5
+ * EVM for pilot 5. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p6
+ * EVM for pilot 6. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p7
+ * EVM for pilot 7. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p8
+ * EVM for pilot 8. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p9
+ * EVM for pilot 9. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p10
+ * EVM for pilot 10. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p11
+ * EVM for pilot 11. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p12
+ * EVM for pilot 12. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p13
+ * EVM for pilot 13. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p14
+ * EVM for pilot 14. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * evm_p15
+ * EVM for pilot 15. Contain EVM for streams: 0, 1, 2 and 3.
+ *
+ * tsf_timestamp
+ * Receive TSF timestamp sampled on the rising edge of
+ * rx_clear. For PHY errors this may be the current TSF when
+ * phy_error is asserted if the rx_clear does not assert before
+ * the end of the PHY error.
+ *
+ * wb_timestamp
+ * WLAN/BT timestamp is a 1 usec resolution timestamp which
+ * does not get updated based on receive beacon like TSF. The
+ * same rules for capturing tsf_timestamp are used to capture
+ * the wb_timestamp.
+ *
+ * locationing_timestamp
+ * Timestamp used for locationing. This timestamp is used to
+ * indicate fractions of usec. For example if the MAC clock is
+ * running at 80 MHz, the timestamp will increment every 12.5
+ * nsec. The value starts at 0 and increments to 79 and
+ * returns to 0 and repeats. This information is valid for
+ * every PPDU. This information can be used in conjunction
+ * with wb_timestamp to capture large delta times.
+ *
+ * phy_err_code
+ * See the 1.10.8.1.2 for the list of the PHY error codes.
+ *
+ * phy_err
+ * Indicates a PHY error was detected for this PPDU.
+ *
+ * rx_location
+ * Indicates that location information was requested.
+ *
+ * txbf_h_info
+ * Indicates that the packet data carries H information which
+ * is used for TxBF debug.
+ *
+ * reserved_18
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * rx_antenna
+ * Receive antenna value
+ *
+ * tx_ht_vht_ack
+ * Indicates that a HT or VHT Ack/BA frame was transmitted in
+ * response to this receive packet.
+ *
+ * bb_captured_channel
+ * Indicates that the BB has captured a channel dump. FW can
+ * then read the channel dump memory. This may indicate that
+ * the channel was captured either based on PCU setting the
+ * capture_channel bit BB descriptor or FW setting the
+ * capture_channel mode bit.
+ *
+ * reserved_19
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * bb_length
+ * Indicates the number of bytes of baseband information for
+ * PPDUs where the BB descriptor preamble type is 0x80 to 0xFF
+ * which indicates that this is not a normal PPDU but rather
+ * contains baseband debug information.
+ *
+ * reserved_20
+ * Reserved: HW should fill with 0, FW should ignore.
+ *
+ * ppdu_done
+ * PPDU end status is only valid when ppdu_done bit is set.
+ * Every time HW sets this bit in memory FW/SW must clear this
+ * bit in memory. FW will initialize all the ppdu_done dword
+ * to 0.
+ */
+
+#define FW_RX_DESC_INFO0_DISCARD BIT(0)
+#define FW_RX_DESC_INFO0_FORWARD BIT(1)
+#define FW_RX_DESC_INFO0_INSPECT BIT(5)
+#define FW_RX_DESC_INFO0_EXT_MASK 0xC0
+#define FW_RX_DESC_INFO0_EXT_LSB 6
+
+struct fw_rx_desc_base {
+ u8 info0;
+} __packed;
+
+#endif /* _RX_DESC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
new file mode 100644
index 000000000..28d86da65
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -0,0 +1,2147 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio.h>
+#include <linux/mmc/sd.h>
+#include <linux/bitfield.h>
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "mac.h"
+#include "targaddrs.h"
+#include "trace.h"
+#include "sdio.h"
+
+/* inlined helper functions */
+
+static inline int ath10k_sdio_calc_txrx_padded_len(struct ath10k_sdio *ar_sdio,
+ size_t len)
+{
+ return __ALIGN_MASK((len), ar_sdio->mbox_info.block_mask);
+}
+
+static inline enum ath10k_htc_ep_id pipe_id_to_eid(u8 pipe_id)
+{
+ return (enum ath10k_htc_ep_id)pipe_id;
+}
+
+static inline void ath10k_sdio_mbox_free_rx_pkt(struct ath10k_sdio_rx_data *pkt)
+{
+ dev_kfree_skb(pkt->skb);
+ pkt->skb = NULL;
+ pkt->alloc_len = 0;
+ pkt->act_len = 0;
+ pkt->trailer_only = false;
+}
+
+static inline int ath10k_sdio_mbox_alloc_rx_pkt(struct ath10k_sdio_rx_data *pkt,
+ size_t act_len, size_t full_len,
+ bool part_of_bundle,
+ bool last_in_bundle)
+{
+ pkt->skb = dev_alloc_skb(full_len);
+ if (!pkt->skb)
+ return -ENOMEM;
+
+ pkt->act_len = act_len;
+ pkt->alloc_len = full_len;
+ pkt->part_of_bundle = part_of_bundle;
+ pkt->last_in_bundle = last_in_bundle;
+ pkt->trailer_only = false;
+
+ return 0;
+}
+
+static inline bool is_trailer_only_msg(struct ath10k_sdio_rx_data *pkt)
+{
+ bool trailer_only = false;
+ struct ath10k_htc_hdr *htc_hdr =
+ (struct ath10k_htc_hdr *)pkt->skb->data;
+ u16 len = __le16_to_cpu(htc_hdr->len);
+
+ if (len == htc_hdr->trailer_len)
+ trailer_only = true;
+
+ return trailer_only;
+}
+
+/* sdio/mmc functions */
+
+static inline void ath10k_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
+ unsigned int address,
+ unsigned char val)
+{
+ *arg = FIELD_PREP(BIT(31), write) |
+ FIELD_PREP(BIT(27), raw) |
+ FIELD_PREP(BIT(26), 1) |
+ FIELD_PREP(GENMASK(25, 9), address) |
+ FIELD_PREP(BIT(8), 1) |
+ FIELD_PREP(GENMASK(7, 0), val);
+}
+
+static int ath10k_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char byte)
+{
+ struct mmc_command io_cmd;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(card->host, &io_cmd, 0);
+}
+
+static int ath10k_sdio_func0_cmd52_rd_byte(struct mmc_card *card,
+ unsigned int address,
+ unsigned char *byte)
+{
+ struct mmc_command io_cmd;
+ int ret;
+
+ memset(&io_cmd, 0, sizeof(io_cmd));
+ ath10k_sdio_set_cmd52_arg(&io_cmd.arg, 0, 0, address, 0);
+ io_cmd.opcode = SD_IO_RW_DIRECT;
+ io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
+
+ ret = mmc_wait_for_cmd(card->host, &io_cmd, 0);
+ if (!ret)
+ *byte = io_cmd.resp[0];
+
+ return ret;
+}
+
+static int ath10k_sdio_config(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ unsigned char byte, asyncintdelay = 2;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio configuration\n");
+
+ sdio_claim_host(func);
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
+ SDIO_CCCR_DRIVE_STRENGTH,
+ &byte);
+
+ byte &= ~ATH10K_SDIO_DRIVE_DTSX_MASK;
+ byte |= FIELD_PREP(ATH10K_SDIO_DRIVE_DTSX_MASK,
+ ATH10K_SDIO_DRIVE_DTSX_TYPE_D);
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ SDIO_CCCR_DRIVE_STRENGTH,
+ byte);
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(
+ func->card,
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
+ &byte);
+
+ byte |= (CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A |
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C |
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D);
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR,
+ byte);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable driver strength: %d\n", ret);
+ goto out;
+ }
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG_SDIO3,
+ &byte);
+
+ byte |= SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3;
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG_SDIO3,
+ byte);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable 4-bit async irq mode: %d\n",
+ ret);
+ goto out;
+ }
+
+ byte = 0;
+ ret = ath10k_sdio_func0_cmd52_rd_byte(func->card,
+ CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+ &byte);
+
+ byte &= ~CCCR_SDIO_ASYNC_INT_DELAY_MASK;
+ byte |= FIELD_PREP(CCCR_SDIO_ASYNC_INT_DELAY_MASK, asyncintdelay);
+
+ ret = ath10k_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS,
+ byte);
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ ret = sdio_set_block_size(func, ar_sdio->mbox_info.block_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to set sdio block size to %d: %d\n",
+ ar_sdio->mbox_info.block_size, ret);
+ goto out;
+ }
+
+out:
+ sdio_release_host(func);
+ return ret;
+}
+
+static int ath10k_sdio_write32(struct ath10k *ar, u32 addr, u32 val)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, val, addr, &ret);
+ if (ret) {
+ ath10k_warn(ar, "failed to write 0x%x to address 0x%x: %d\n",
+ val, addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write32 addr 0x%x val 0x%x\n",
+ addr, val);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_writesb32(struct ath10k *ar, u32 addr, u32 val)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ __le32 *buf;
+ int ret;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ *buf = cpu_to_le32(val);
+
+ sdio_claim_host(func);
+
+ ret = sdio_writesb(func, addr, buf, sizeof(*buf));
+ if (ret) {
+ ath10k_warn(ar, "failed to write value 0x%x to fixed sb address 0x%x: %d\n",
+ val, addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio writesb32 addr 0x%x val 0x%x\n",
+ addr, val);
+
+out:
+ sdio_release_host(func);
+
+ kfree(buf);
+
+ return ret;
+}
+
+static int ath10k_sdio_read32(struct ath10k *ar, u32 addr, u32 *val)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+ *val = sdio_readl(func, addr, &ret);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read32 addr 0x%x val 0x%x\n",
+ addr, *val);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_read(struct ath10k *ar, u32 addr, void *buf, size_t len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ ret = sdio_memcpy_fromio(func, buf, addr, len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio read addr 0x%x buf 0x%p len %zu\n",
+ addr, buf, len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio read ", buf, len);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_write(struct ath10k *ar, u32 addr, const void *buf, size_t len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ /* For some reason toio() doesn't have const for the buffer, need
+ * an ugly hack to workaround that.
+ */
+ ret = sdio_memcpy_toio(func, addr, (void *)buf, len);
+ if (ret) {
+ ath10k_warn(ar, "failed to write to address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio write addr 0x%x buf 0x%p len %zu\n",
+ addr, buf, len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio write ", buf, len);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath10k_sdio_readsb(struct ath10k *ar, u32 addr, void *buf, size_t len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ len = round_down(len, ar_sdio->mbox_info.block_size);
+
+ ret = sdio_readsb(func, buf, addr, len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from fixed (sb) address 0x%x: %d\n",
+ addr, ret);
+ goto out;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio readsb addr 0x%x buf 0x%p len %zu\n",
+ addr, buf, len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_SDIO_DUMP, NULL, "sdio readsb ", buf, len);
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+/* HIF mbox functions */
+
+static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *pkt,
+ u32 *lookaheads,
+ int *n_lookaheads)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct sk_buff *skb = pkt->skb;
+ struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+ enum ath10k_htc_ep_id eid;
+ u8 *trailer;
+ int ret;
+
+ if (trailer_present) {
+ trailer = skb->data + skb->len - htc_hdr->trailer_len;
+
+ eid = pipe_id_to_eid(htc_hdr->eid);
+
+ ret = ath10k_htc_process_trailer(htc,
+ trailer,
+ htc_hdr->trailer_len,
+ eid,
+ lookaheads,
+ n_lookaheads);
+ if (ret)
+ return ret;
+
+ if (is_trailer_only_msg(pkt))
+ pkt->trailer_only = true;
+
+ skb_trim(skb, skb->len - htc_hdr->trailer_len);
+ }
+
+ skb_pull(skb, sizeof(*htc_hdr));
+
+ return 0;
+}
+
+static int ath10k_sdio_mbox_rx_process_packets(struct ath10k *ar,
+ u32 lookaheads[],
+ int *n_lookahead)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_sdio_rx_data *pkt;
+ struct ath10k_htc_ep *ep;
+ enum ath10k_htc_ep_id id;
+ int ret, i, *n_lookahead_local;
+ u32 *lookaheads_local;
+ int lookahead_idx = 0;
+
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
+ lookaheads_local = lookaheads;
+ n_lookahead_local = n_lookahead;
+
+ id = ((struct ath10k_htc_hdr *)
+ &lookaheads[lookahead_idx++])->eid;
+
+ if (id >= ATH10K_HTC_EP_COUNT) {
+ ath10k_warn(ar, "invalid endpoint in look-ahead: %d\n",
+ id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ep = &htc->endpoint[id];
+
+ if (ep->service_id == 0) {
+ ath10k_warn(ar, "ep %d is not connected\n", id);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pkt = &ar_sdio->rx_pkts[i];
+
+ if (pkt->part_of_bundle && !pkt->last_in_bundle) {
+ /* Only read lookahead's from RX trailers
+ * for the last packet in a bundle.
+ */
+ lookahead_idx--;
+ lookaheads_local = NULL;
+ n_lookahead_local = NULL;
+ }
+
+ ret = ath10k_sdio_mbox_rx_process_packet(ar,
+ pkt,
+ lookaheads_local,
+ n_lookahead_local);
+ if (ret)
+ goto out;
+
+ if (!pkt->trailer_only)
+ ep->ep_ops.ep_rx_complete(ar_sdio->ar, pkt->skb);
+ else
+ kfree_skb(pkt->skb);
+
+ /* The RX complete handler now owns the skb...*/
+ pkt->skb = NULL;
+ pkt->alloc_len = 0;
+ }
+
+ ret = 0;
+
+out:
+ /* Free all packets that was not passed on to the RX completion
+ * handler...
+ */
+ for (; i < ar_sdio->n_rx_pkts; i++)
+ ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_alloc_pkt_bundle(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *rx_pkts,
+ struct ath10k_htc_hdr *htc_hdr,
+ size_t full_len, size_t act_len,
+ size_t *bndl_cnt)
+{
+ int ret, i;
+
+ *bndl_cnt = FIELD_GET(ATH10K_HTC_FLAG_BUNDLE_MASK, htc_hdr->flags);
+
+ if (*bndl_cnt > HTC_HOST_MAX_MSG_PER_RX_BUNDLE) {
+ ath10k_warn(ar,
+ "HTC bundle length %u exceeds maximum %u\n",
+ le16_to_cpu(htc_hdr->len),
+ HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
+ return -ENOMEM;
+ }
+
+ /* Allocate bndl_cnt extra skb's for the bundle.
+ * The package containing the
+ * ATH10K_HTC_FLAG_BUNDLE_MASK flag is not included
+ * in bndl_cnt. The skb for that packet will be
+ * allocated separately.
+ */
+ for (i = 0; i < *bndl_cnt; i++) {
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&rx_pkts[i],
+ act_len,
+ full_len,
+ true,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
+ u32 lookaheads[], int n_lookaheads)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_htc_hdr *htc_hdr;
+ size_t full_len, act_len;
+ bool last_in_bundle;
+ int ret, i;
+
+ if (n_lookaheads > ATH10K_SDIO_MAX_RX_MSGS) {
+ ath10k_warn(ar,
+ "the total number of pkgs to be fetched (%u) exceeds maximum %u\n",
+ n_lookaheads,
+ ATH10K_SDIO_MAX_RX_MSGS);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ for (i = 0; i < n_lookaheads; i++) {
+ htc_hdr = (struct ath10k_htc_hdr *)&lookaheads[i];
+ last_in_bundle = false;
+
+ if (le16_to_cpu(htc_hdr->len) >
+ ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH) {
+ ath10k_warn(ar,
+ "payload length %d exceeds max htc length: %zu\n",
+ le16_to_cpu(htc_hdr->len),
+ ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
+ ret = -ENOMEM;
+
+ queue_work(ar->workqueue, &ar->restart_work);
+ ath10k_warn(ar, "exceeds length, start recovery\n");
+
+ goto err;
+ }
+
+ act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+ full_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio, act_len);
+
+ if (full_len > ATH10K_SDIO_MAX_BUFFER_SIZE) {
+ ath10k_warn(ar,
+ "rx buffer requested with invalid htc_hdr length (%d, 0x%x): %d\n",
+ htc_hdr->eid, htc_hdr->flags,
+ le16_to_cpu(htc_hdr->len));
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (htc_hdr->flags & ATH10K_HTC_FLAG_BUNDLE_MASK) {
+ /* HTC header indicates that every packet to follow
+ * has the same padded length so that it can be
+ * optimally fetched as a full bundle.
+ */
+ size_t bndl_cnt;
+
+ ret = ath10k_sdio_mbox_alloc_pkt_bundle(ar,
+ &ar_sdio->rx_pkts[i],
+ htc_hdr,
+ full_len,
+ act_len,
+ &bndl_cnt);
+
+ n_lookaheads += bndl_cnt;
+ i += bndl_cnt;
+ /*Next buffer will be the last in the bundle */
+ last_in_bundle = true;
+ }
+
+ /* Allocate skb for packet. If the packet had the
+ * ATH10K_HTC_FLAG_BUNDLE_MASK flag set, all bundled
+ * packet skb's have been allocated in the previous step.
+ */
+ if (htc_hdr->flags & ATH10K_HTC_FLAGS_RECV_1MORE_BLOCK)
+ full_len += ATH10K_HIF_MBOX_BLOCK_SIZE;
+
+ ret = ath10k_sdio_mbox_alloc_rx_pkt(&ar_sdio->rx_pkts[i],
+ act_len,
+ full_len,
+ last_in_bundle,
+ last_in_bundle);
+ if (ret) {
+ ath10k_warn(ar, "alloc_rx_pkt error %d\n", ret);
+ goto err;
+ }
+ }
+
+ ar_sdio->n_rx_pkts = i;
+
+ return 0;
+
+err:
+ for (i = 0; i < ATH10K_SDIO_MAX_RX_MSGS; i++) {
+ if (!ar_sdio->rx_pkts[i].alloc_len)
+ break;
+ ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+ }
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
+ struct ath10k_sdio_rx_data *pkt)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sk_buff *skb = pkt->skb;
+ struct ath10k_htc_hdr *htc_hdr;
+ int ret;
+
+ ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
+ skb->data, pkt->alloc_len);
+ if (ret)
+ goto out;
+
+ /* Update actual length. The original length may be incorrect,
+ * as the FW will bundle multiple packets as long as their sizes
+ * fit within the same aligned length (pkt->alloc_len).
+ */
+ htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
+ if (pkt->act_len > pkt->alloc_len) {
+ ath10k_warn(ar, "rx packet too large (%zu > %zu)\n",
+ pkt->act_len, pkt->alloc_len);
+ ret = -EMSGSIZE;
+ goto out;
+ }
+
+ skb_put(skb, pkt->act_len);
+
+out:
+ pkt->status = ret;
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_rx_fetch(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ int ret, i;
+
+ for (i = 0; i < ar_sdio->n_rx_pkts; i++) {
+ ret = ath10k_sdio_mbox_rx_packet(ar,
+ &ar_sdio->rx_pkts[i]);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ /* Free all packets that was not successfully fetched. */
+ for (; i < ar_sdio->n_rx_pkts; i++)
+ ath10k_sdio_mbox_free_rx_pkt(&ar_sdio->rx_pkts[i]);
+
+ return ret;
+}
+
+/* This is the timeout for mailbox processing done in the sdio irq
+ * handler. The timeout is deliberately set quite high since SDIO dump logs
+ * over serial port can/will add a substantial overhead to the processing
+ * (if enabled).
+ */
+#define SDIO_MBOX_PROCESSING_TIMEOUT_HZ (20 * HZ)
+
+static int ath10k_sdio_mbox_rxmsg_pending_handler(struct ath10k *ar,
+ u32 msg_lookahead, bool *done)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 lookaheads[ATH10K_SDIO_MAX_RX_MSGS];
+ int n_lookaheads = 1;
+ unsigned long timeout;
+ int ret;
+
+ *done = true;
+
+ /* Copy the lookahead obtained from the HTC register table into our
+ * temp array as a start value.
+ */
+ lookaheads[0] = msg_lookahead;
+
+ timeout = jiffies + SDIO_MBOX_PROCESSING_TIMEOUT_HZ;
+ do {
+ /* Try to allocate as many HTC RX packets indicated by
+ * n_lookaheads.
+ */
+ ret = ath10k_sdio_mbox_rx_alloc(ar, lookaheads,
+ n_lookaheads);
+ if (ret)
+ break;
+
+ if (ar_sdio->n_rx_pkts >= 2)
+ /* A recv bundle was detected, force IRQ status
+ * re-check again.
+ */
+ *done = false;
+
+ ret = ath10k_sdio_mbox_rx_fetch(ar);
+
+ /* Process fetched packets. This will potentially update
+ * n_lookaheads depending on if the packets contain lookahead
+ * reports.
+ */
+ n_lookaheads = 0;
+ ret = ath10k_sdio_mbox_rx_process_packets(ar,
+ lookaheads,
+ &n_lookaheads);
+
+ if (!n_lookaheads || ret)
+ break;
+
+ /* For SYNCH processing, if we get here, we are running
+ * through the loop again due to updated lookaheads. Set
+ * flag that we should re-check IRQ status registers again
+ * before leaving IRQ processing, this can net better
+ * performance in high throughput situations.
+ */
+ *done = false;
+ } while (time_before(jiffies, timeout));
+
+ if (ret && (ret != -ECANCELED))
+ ath10k_warn(ar, "failed to get pending recv messages: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_dbg_intr(struct ath10k *ar)
+{
+ u32 val;
+ int ret;
+
+ /* TODO: Add firmware crash handling */
+ ath10k_warn(ar, "firmware crashed\n");
+
+ /* read counter to clear the interrupt, the debug error interrupt is
+ * counter 0.
+ */
+ ret = ath10k_sdio_read32(ar, MBOX_COUNT_DEC_ADDRESS, &val);
+ if (ret)
+ ath10k_warn(ar, "failed to clear debug interrupt: %d\n", ret);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_counter_intr(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ u8 counter_int_status;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+ counter_int_status = irq_data->irq_proc_reg->counter_int_status &
+ irq_data->irq_en_reg->cntr_int_status_en;
+
+ /* NOTE: other modules like GMBOX may use the counter interrupt for
+ * credit flow control on other counters, we only need to check for
+ * the debug assertion counter interrupt.
+ */
+ if (counter_int_status & ATH10K_SDIO_TARGET_DEBUG_INTR_MASK)
+ ret = ath10k_sdio_mbox_proc_dbg_intr(ar);
+ else
+ ret = 0;
+
+ mutex_unlock(&irq_data->mtx);
+
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_err_intr(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ u8 error_int_status;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio error interrupt\n");
+
+ error_int_status = irq_data->irq_proc_reg->error_int_status & 0x0F;
+ if (!error_int_status) {
+ ath10k_warn(ar, "invalid error interrupt status: 0x%x\n",
+ error_int_status);
+ return -EIO;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio error_int_status 0x%x\n", error_int_status);
+
+ if (FIELD_GET(MBOX_ERROR_INT_STATUS_WAKEUP_MASK,
+ error_int_status))
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio interrupt error wakeup\n");
+
+ if (FIELD_GET(MBOX_ERROR_INT_STATUS_RX_UNDERFLOW_MASK,
+ error_int_status))
+ ath10k_warn(ar, "rx underflow interrupt error\n");
+
+ if (FIELD_GET(MBOX_ERROR_INT_STATUS_TX_OVERFLOW_MASK,
+ error_int_status))
+ ath10k_warn(ar, "tx overflow interrupt error\n");
+
+ /* Clear the interrupt */
+ irq_data->irq_proc_reg->error_int_status &= ~error_int_status;
+
+ /* set W1C value to clear the interrupt, this hits the register first */
+ ret = ath10k_sdio_writesb32(ar, MBOX_ERROR_INT_STATUS_ADDRESS,
+ error_int_status);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to error int status address: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ u8 cpu_int_status;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+ cpu_int_status = irq_data->irq_proc_reg->cpu_int_status &
+ irq_data->irq_en_reg->cpu_int_status_en;
+ if (!cpu_int_status) {
+ ath10k_warn(ar, "CPU interrupt status is zero\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Clear the interrupt */
+ irq_data->irq_proc_reg->cpu_int_status &= ~cpu_int_status;
+
+ /* Set up the register transfer buffer to hit the register 4 times,
+ * this is done to make the access 4-byte aligned to mitigate issues
+ * with host bus interconnects that restrict bus transfer lengths to
+ * be a multiple of 4-bytes.
+ *
+ * Set W1C value to clear the interrupt, this hits the register first.
+ */
+ ret = ath10k_sdio_writesb32(ar, MBOX_CPU_INT_STATUS_ADDRESS,
+ cpu_int_status);
+ if (ret) {
+ ath10k_warn(ar, "unable to write to cpu interrupt status address: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&irq_data->mtx);
+ return ret;
+}
+
+static int ath10k_sdio_mbox_read_int_status(struct ath10k *ar,
+ u8 *host_int_status,
+ u32 *lookahead)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_proc_regs *irq_proc_reg = irq_data->irq_proc_reg;
+ struct ath10k_sdio_irq_enable_regs *irq_en_reg = irq_data->irq_en_reg;
+ u8 htc_mbox = FIELD_PREP(ATH10K_HTC_MAILBOX_MASK, 1);
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+
+ *lookahead = 0;
+ *host_int_status = 0;
+
+ /* int_status_en is supposed to be non zero, otherwise interrupts
+ * shouldn't be enabled. There is however a short time frame during
+ * initialization between the irq register and int_status_en init
+ * where this can happen.
+ * We silently ignore this condition.
+ */
+ if (!irq_en_reg->int_status_en) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Read the first sizeof(struct ath10k_irq_proc_registers)
+ * bytes of the HTC register table. This
+ * will yield us the value of different int status
+ * registers and the lookahead registers.
+ */
+ ret = ath10k_sdio_read(ar, MBOX_HOST_INT_STATUS_ADDRESS,
+ irq_proc_reg, sizeof(*irq_proc_reg));
+ if (ret)
+ goto out;
+
+ /* Update only those registers that are enabled */
+ *host_int_status = irq_proc_reg->host_int_status &
+ irq_en_reg->int_status_en;
+
+ /* Look at mbox status */
+ if (!(*host_int_status & htc_mbox)) {
+ *lookahead = 0;
+ ret = 0;
+ goto out;
+ }
+
+ /* Mask out pending mbox value, we use look ahead as
+ * the real flag for mbox processing.
+ */
+ *host_int_status &= ~htc_mbox;
+ if (irq_proc_reg->rx_lookahead_valid & htc_mbox) {
+ *lookahead = le32_to_cpu(
+ irq_proc_reg->rx_lookahead[ATH10K_HTC_MAILBOX]);
+ if (!*lookahead)
+ ath10k_warn(ar, "sdio mbox lookahead is zero\n");
+ }
+
+out:
+ mutex_unlock(&irq_data->mtx);
+ return ret;
+}
+
+static int ath10k_sdio_mbox_proc_pending_irqs(struct ath10k *ar,
+ bool *done)
+{
+ u8 host_int_status;
+ u32 lookahead;
+ int ret;
+
+ /* NOTE: HIF implementation guarantees that the context of this
+ * call allows us to perform SYNCHRONOUS I/O, that is we can block,
+ * sleep or call any API that can block or switch thread/task
+ * contexts. This is a fully schedulable context.
+ */
+
+ ret = ath10k_sdio_mbox_read_int_status(ar,
+ &host_int_status,
+ &lookahead);
+ if (ret) {
+ *done = true;
+ goto out;
+ }
+
+ if (!host_int_status && !lookahead) {
+ ret = 0;
+ *done = true;
+ goto out;
+ }
+
+ if (lookahead) {
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio pending mailbox msg lookahead 0x%08x\n",
+ lookahead);
+
+ ret = ath10k_sdio_mbox_rxmsg_pending_handler(ar,
+ lookahead,
+ done);
+ if (ret)
+ goto out;
+ }
+
+ /* now, handle the rest of the interrupts */
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio host_int_status 0x%x\n", host_int_status);
+
+ if (FIELD_GET(MBOX_HOST_INT_STATUS_CPU_MASK, host_int_status)) {
+ /* CPU Interrupt */
+ ret = ath10k_sdio_mbox_proc_cpu_intr(ar);
+ if (ret)
+ goto out;
+ }
+
+ if (FIELD_GET(MBOX_HOST_INT_STATUS_ERROR_MASK, host_int_status)) {
+ /* Error Interrupt */
+ ret = ath10k_sdio_mbox_proc_err_intr(ar);
+ if (ret)
+ goto out;
+ }
+
+ if (FIELD_GET(MBOX_HOST_INT_STATUS_COUNTER_MASK, host_int_status))
+ /* Counter Interrupt */
+ ret = ath10k_sdio_mbox_proc_counter_intr(ar);
+
+ ret = 0;
+
+out:
+ /* An optimization to bypass reading the IRQ status registers
+ * unecessarily which can re-wake the target, if upper layers
+ * determine that we are in a low-throughput mode, we can rely on
+ * taking another interrupt rather than re-checking the status
+ * registers which can re-wake the target.
+ *
+ * NOTE : for host interfaces that makes use of detecting pending
+ * mbox messages at hif can not use this optimization due to
+ * possible side effects, SPI requires the host to drain all
+ * messages from the mailbox before exiting the ISR routine.
+ */
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio pending irqs done %d status %d",
+ *done, ret);
+
+ return ret;
+}
+
+static void ath10k_sdio_set_mbox_info(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_mbox_info *mbox_info = &ar_sdio->mbox_info;
+ u16 device = ar_sdio->func->device, dev_id_base, dev_id_chiprev;
+
+ mbox_info->htc_addr = ATH10K_HIF_MBOX_BASE_ADDR;
+ mbox_info->block_size = ATH10K_HIF_MBOX_BLOCK_SIZE;
+ mbox_info->block_mask = ATH10K_HIF_MBOX_BLOCK_SIZE - 1;
+ mbox_info->gmbox_addr = ATH10K_HIF_GMBOX_BASE_ADDR;
+ mbox_info->gmbox_sz = ATH10K_HIF_GMBOX_WIDTH;
+
+ mbox_info->ext_info[0].htc_ext_addr = ATH10K_HIF_MBOX0_EXT_BASE_ADDR;
+
+ dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, device);
+ dev_id_chiprev = FIELD_GET(QCA_MANUFACTURER_ID_REV_MASK, device);
+ switch (dev_id_base) {
+ case QCA_MANUFACTURER_ID_AR6005_BASE:
+ if (dev_id_chiprev < 4)
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH;
+ else
+ /* from QCA6174 2.0(0x504), the width has been extended
+ * to 56K
+ */
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
+ break;
+ case QCA_MANUFACTURER_ID_QCA9377_BASE:
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0;
+ break;
+ default:
+ mbox_info->ext_info[0].htc_ext_sz =
+ ATH10K_HIF_MBOX0_EXT_WIDTH;
+ }
+
+ mbox_info->ext_info[1].htc_ext_addr =
+ mbox_info->ext_info[0].htc_ext_addr +
+ mbox_info->ext_info[0].htc_ext_sz +
+ ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE;
+ mbox_info->ext_info[1].htc_ext_sz = ATH10K_HIF_MBOX1_EXT_WIDTH;
+}
+
+/* BMI functions */
+
+static int ath10k_sdio_bmi_credits(struct ath10k *ar)
+{
+ u32 addr, cmd_credits;
+ unsigned long timeout;
+ int ret;
+
+ /* Read the counter register to get the command credits */
+ addr = MBOX_COUNT_DEC_ADDRESS + ATH10K_HIF_MBOX_NUM_MAX * 4;
+ timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+ cmd_credits = 0;
+
+ while (time_before(jiffies, timeout) && !cmd_credits) {
+ /* Hit the credit counter with a 4-byte access, the first byte
+ * read will hit the counter and cause a decrement, while the
+ * remaining 3 bytes has no effect. The rationale behind this
+ * is to make all HIF accesses 4-byte aligned.
+ */
+ ret = ath10k_sdio_read32(ar, addr, &cmd_credits);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to decrement the command credit count register: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* The counter is only 8 bits.
+ * Ignore anything in the upper 3 bytes
+ */
+ cmd_credits &= 0xFF;
+ }
+
+ if (!cmd_credits) {
+ ath10k_warn(ar, "bmi communication timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath10k_sdio_bmi_get_rx_lookahead(struct ath10k *ar)
+{
+ unsigned long timeout;
+ u32 rx_word;
+ int ret;
+
+ timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
+ rx_word = 0;
+
+ while ((time_before(jiffies, timeout)) && !rx_word) {
+ ret = ath10k_sdio_read32(ar,
+ MBOX_HOST_INT_STATUS_ADDRESS,
+ &rx_word);
+ if (ret) {
+ ath10k_warn(ar, "unable to read RX_LOOKAHEAD_VALID: %d\n", ret);
+ return ret;
+ }
+
+ /* all we really want is one bit */
+ rx_word &= 1;
+ }
+
+ if (!rx_word) {
+ ath10k_warn(ar, "bmi_recv_buf FIFO empty\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ath10k_sdio_bmi_exchange_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 addr;
+ int ret;
+
+ if (req) {
+ ret = ath10k_sdio_bmi_credits(ar);
+ if (ret)
+ return ret;
+
+ addr = ar_sdio->mbox_info.htc_addr;
+
+ memcpy(ar_sdio->bmi_buf, req, req_len);
+ ret = ath10k_sdio_write(ar, addr, ar_sdio->bmi_buf, req_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to send the bmi data to the device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!resp || !resp_len)
+ /* No response expected */
+ return 0;
+
+ /* During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level.
+ * In particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ ret = ath10k_sdio_bmi_get_rx_lookahead(ar);
+ if (ret)
+ return ret;
+
+ /* We always read from the start of the mbox address */
+ addr = ar_sdio->mbox_info.htc_addr;
+ ret = ath10k_sdio_read(ar, addr, ar_sdio->bmi_buf, *resp_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+
+ memcpy(resp, ar_sdio->bmi_buf, *resp_len);
+
+ return 0;
+}
+
+/* sdio async handling functions */
+
+static struct ath10k_sdio_bus_request
+*ath10k_sdio_alloc_busreq(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_bus_request *bus_req;
+
+ spin_lock_bh(&ar_sdio->lock);
+
+ if (list_empty(&ar_sdio->bus_req_freeq)) {
+ bus_req = NULL;
+ goto out;
+ }
+
+ bus_req = list_first_entry(&ar_sdio->bus_req_freeq,
+ struct ath10k_sdio_bus_request, list);
+ list_del(&bus_req->list);
+
+out:
+ spin_unlock_bh(&ar_sdio->lock);
+ return bus_req;
+}
+
+static void ath10k_sdio_free_bus_req(struct ath10k *ar,
+ struct ath10k_sdio_bus_request *bus_req)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+
+ memset(bus_req, 0, sizeof(*bus_req));
+
+ spin_lock_bh(&ar_sdio->lock);
+ list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
+ spin_unlock_bh(&ar_sdio->lock);
+}
+
+static void __ath10k_sdio_write_async(struct ath10k *ar,
+ struct ath10k_sdio_bus_request *req)
+{
+ struct ath10k_htc_ep *ep;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = req->skb;
+ ret = ath10k_sdio_write(ar, req->address, skb->data, skb->len);
+ if (ret)
+ ath10k_warn(ar, "failed to write skb to 0x%x asynchronously: %d",
+ req->address, ret);
+
+ if (req->htc_msg) {
+ ep = &ar->htc.endpoint[req->eid];
+ ath10k_htc_notify_tx_completion(ep, skb);
+ } else if (req->comp) {
+ complete(req->comp);
+ }
+
+ ath10k_sdio_free_bus_req(ar, req);
+}
+
+static void ath10k_sdio_write_async_work(struct work_struct *work)
+{
+ struct ath10k_sdio *ar_sdio = container_of(work, struct ath10k_sdio,
+ wr_async_work);
+ struct ath10k *ar = ar_sdio->ar;
+ struct ath10k_sdio_bus_request *req, *tmp_req;
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+ __ath10k_sdio_write_async(ar, req);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+}
+
+static int ath10k_sdio_prep_async_req(struct ath10k *ar, u32 addr,
+ struct sk_buff *skb,
+ struct completion *comp,
+ bool htc_msg, enum ath10k_htc_ep_id eid)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_bus_request *bus_req;
+
+ /* Allocate a bus request for the message and queue it on the
+ * SDIO workqueue.
+ */
+ bus_req = ath10k_sdio_alloc_busreq(ar);
+ if (!bus_req) {
+ ath10k_warn(ar,
+ "unable to allocate bus request for async request\n");
+ return -ENOMEM;
+ }
+
+ bus_req->skb = skb;
+ bus_req->eid = eid;
+ bus_req->address = addr;
+ bus_req->htc_msg = htc_msg;
+ bus_req->comp = comp;
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+ list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ return 0;
+}
+
+/* IRQ handler */
+
+static void ath10k_sdio_irq_handler(struct sdio_func *func)
+{
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+ unsigned long timeout;
+ bool done = false;
+ int ret;
+
+ /* Release the host during interrupts so we can pick it back up when
+ * we process commands.
+ */
+ sdio_release_host(ar_sdio->func);
+
+ timeout = jiffies + ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ;
+ do {
+ ret = ath10k_sdio_mbox_proc_pending_irqs(ar, &done);
+ if (ret)
+ break;
+ } while (time_before(jiffies, timeout) && !done);
+
+ ath10k_mac_tx_push_pending(ar);
+
+ sdio_claim_host(ar_sdio->func);
+
+ if (ret && ret != -ECANCELED)
+ ath10k_warn(ar, "failed to process pending SDIO interrupts: %d\n",
+ ret);
+}
+
+/* sdio HIF functions */
+
+static int ath10k_sdio_hif_disable_intrs(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+
+ memset(regs, 0, sizeof(*regs));
+ ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
+ &regs->int_status_en, sizeof(*regs));
+ if (ret)
+ ath10k_warn(ar, "unable to disable sdio interrupts: %d\n", ret);
+
+ mutex_unlock(&irq_data->mtx);
+
+ return ret;
+}
+
+static int ath10k_sdio_hif_power_up(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ if (!ar_sdio->is_disabled)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power on\n");
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret) {
+ ath10k_warn(ar, "unable to enable sdio function: %d)\n", ret);
+ sdio_release_host(func);
+ return ret;
+ }
+
+ sdio_release_host(func);
+
+ /* Wait for hardware to initialise. It should take a lot less than
+ * 20 ms but let's be conservative here.
+ */
+ msleep(20);
+
+ ar_sdio->is_disabled = false;
+
+ ret = ath10k_sdio_hif_disable_intrs(ar);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ath10k_sdio_hif_power_down(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ int ret;
+
+ if (ar_sdio->is_disabled)
+ return;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "sdio power off\n");
+
+ /* Disable the card */
+ sdio_claim_host(ar_sdio->func);
+ ret = sdio_disable_func(ar_sdio->func);
+ sdio_release_host(ar_sdio->func);
+
+ if (ret)
+ ath10k_warn(ar, "unable to disable sdio function: %d\n", ret);
+
+ ar_sdio->is_disabled = true;
+}
+
+static int ath10k_sdio_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ enum ath10k_htc_ep_id eid;
+ struct sk_buff *skb;
+ int ret, i;
+
+ eid = pipe_id_to_eid(pipe_id);
+
+ for (i = 0; i < n_items; i++) {
+ size_t padded_len;
+ u32 address;
+
+ skb = items[i].transfer_context;
+ padded_len = ath10k_sdio_calc_txrx_padded_len(ar_sdio,
+ skb->len);
+ skb_trim(skb, padded_len);
+
+ /* Write TX data to the end of the mbox address space */
+ address = ar_sdio->mbox_addr[eid] + ar_sdio->mbox_size[eid] -
+ skb->len;
+ ret = ath10k_sdio_prep_async_req(ar, address, skb,
+ NULL, true, eid);
+ if (ret)
+ return ret;
+ }
+
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
+
+ return 0;
+}
+
+static int ath10k_sdio_hif_enable_intrs(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
+ int ret;
+
+ mutex_lock(&irq_data->mtx);
+
+ /* Enable all but CPU interrupts */
+ regs->int_status_en = FIELD_PREP(MBOX_INT_STATUS_ENABLE_ERROR_MASK, 1) |
+ FIELD_PREP(MBOX_INT_STATUS_ENABLE_CPU_MASK, 1) |
+ FIELD_PREP(MBOX_INT_STATUS_ENABLE_COUNTER_MASK, 1);
+
+ /* NOTE: There are some cases where HIF can do detection of
+ * pending mbox messages which is disabled now.
+ */
+ regs->int_status_en |=
+ FIELD_PREP(MBOX_INT_STATUS_ENABLE_MBOX_DATA_MASK, 1);
+
+ /* Set up the CPU Interrupt status Register */
+ regs->cpu_int_status_en = 0;
+
+ /* Set up the Error Interrupt status Register */
+ regs->err_int_status_en =
+ FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_RX_UNDERFLOW_MASK, 1) |
+ FIELD_PREP(MBOX_ERROR_STATUS_ENABLE_TX_OVERFLOW_MASK, 1);
+
+ /* Enable Counter interrupt status register to get fatal errors for
+ * debugging.
+ */
+ regs->cntr_int_status_en =
+ FIELD_PREP(MBOX_COUNTER_INT_STATUS_ENABLE_BIT_MASK,
+ ATH10K_SDIO_TARGET_DEBUG_INTR_MASK);
+
+ ret = ath10k_sdio_write(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
+ &regs->int_status_en, sizeof(*regs));
+ if (ret)
+ ath10k_warn(ar,
+ "failed to update mbox interrupt status register : %d\n",
+ ret);
+
+ mutex_unlock(&irq_data->mtx);
+ return ret;
+}
+
+static int ath10k_sdio_hif_set_mbox_sleep(struct ath10k *ar, bool enable_sleep)
+{
+ u32 val;
+ int ret;
+
+ ret = ath10k_sdio_read32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, &val);
+ if (ret) {
+ ath10k_warn(ar, "failed to read fifo/chip control register: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (enable_sleep)
+ val &= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF;
+ else
+ val |= ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON;
+
+ ret = ath10k_sdio_write32(ar, ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL, val);
+ if (ret) {
+ ath10k_warn(ar, "failed to write to FIFO_TIMEOUT_AND_CHIP_CONTROL: %d",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* HIF diagnostics */
+
+static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ int ret;
+ void *mem;
+
+ mem = kzalloc(buf_len, GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ /* set window register to start read cycle */
+ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
+ if (ret) {
+ ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
+ goto out;
+ }
+
+ /* read the data */
+ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
+ ret);
+ goto out;
+ }
+
+ memcpy(buf, mem, buf_len);
+
+out:
+ kfree(mem);
+
+ return ret;
+}
+
+static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
+ u32 *value)
+{
+ __le32 *val;
+ int ret;
+
+ val = kzalloc(sizeof(*val), GFP_KERNEL);
+ if (!val)
+ return -ENOMEM;
+
+ ret = ath10k_sdio_hif_diag_read(ar, address, val, sizeof(*val));
+ if (ret)
+ goto out;
+
+ *value = __le32_to_cpu(*val);
+
+out:
+ kfree(val);
+
+ return ret;
+}
+
+static int ath10k_sdio_hif_diag_write_mem(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ int ret;
+
+ /* set write data */
+ ret = ath10k_sdio_write(ar, MBOX_WINDOW_DATA_ADDRESS, data, nbytes);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to write 0x%p to mbox window data address: %d\n",
+ data, ret);
+ return ret;
+ }
+
+ /* set window register, which starts the write cycle */
+ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_WRITE_ADDR_ADDRESS, address);
+ if (ret) {
+ ath10k_warn(ar, "failed to set mbox window write address: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* HIF start/stop */
+
+static int ath10k_sdio_hif_start(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ u32 addr, val;
+ int ret;
+
+ /* Sleep 20 ms before HIF interrupts are disabled.
+ * This will give target plenty of time to process the BMI done
+ * request before interrupts are disabled.
+ */
+ msleep(20);
+ ret = ath10k_sdio_hif_disable_intrs(ar);
+ if (ret)
+ return ret;
+
+ /* eid 0 always uses the lower part of the extended mailbox address
+ * space (ext_info[0].htc_ext_addr).
+ */
+ ar_sdio->mbox_addr[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
+ ar_sdio->mbox_size[0] = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
+
+ sdio_claim_host(ar_sdio->func);
+
+ /* Register the isr */
+ ret = sdio_claim_irq(ar_sdio->func, ath10k_sdio_irq_handler);
+ if (ret) {
+ ath10k_warn(ar, "failed to claim sdio interrupt: %d\n", ret);
+ sdio_release_host(ar_sdio->func);
+ return ret;
+ }
+
+ sdio_release_host(ar_sdio->func);
+
+ ret = ath10k_sdio_hif_enable_intrs(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to enable sdio interrupts: %d\n", ret);
+
+ addr = host_interest_item_address(HI_ITEM(hi_acs_flags));
+
+ ret = ath10k_sdio_hif_diag_read32(ar, addr, &val);
+ if (ret) {
+ ath10k_warn(ar, "unable to read hi_acs_flags address: %d\n", ret);
+ return ret;
+ }
+
+ if (val & HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK) {
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio mailbox swap service enabled\n");
+ ar_sdio->swap_mbox = true;
+ }
+
+ /* Enable sleep and then disable it again */
+ ret = ath10k_sdio_hif_set_mbox_sleep(ar, true);
+ if (ret)
+ return ret;
+
+ /* Wait for 20ms for the written value to take effect */
+ msleep(20);
+
+ ret = ath10k_sdio_hif_set_mbox_sleep(ar, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#define SDIO_IRQ_DISABLE_TIMEOUT_HZ (3 * HZ)
+
+static void ath10k_sdio_irq_disable(struct ath10k *ar)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_sdio_irq_data *irq_data = &ar_sdio->irq_data;
+ struct ath10k_sdio_irq_enable_regs *regs = irq_data->irq_en_reg;
+ struct sk_buff *skb;
+ struct completion irqs_disabled_comp;
+ int ret;
+
+ skb = dev_alloc_skb(sizeof(*regs));
+ if (!skb)
+ return;
+
+ mutex_lock(&irq_data->mtx);
+
+ memset(regs, 0, sizeof(*regs)); /* disable all interrupts */
+ memcpy(skb->data, regs, sizeof(*regs));
+ skb_put(skb, sizeof(*regs));
+
+ mutex_unlock(&irq_data->mtx);
+
+ init_completion(&irqs_disabled_comp);
+ ret = ath10k_sdio_prep_async_req(ar, MBOX_INT_STATUS_ENABLE_ADDRESS,
+ skb, &irqs_disabled_comp, false, 0);
+ if (ret)
+ goto out;
+
+ queue_work(ar_sdio->workqueue, &ar_sdio->wr_async_work);
+
+ /* Wait for the completion of the IRQ disable request.
+ * If there is a timeout we will try to disable irq's anyway.
+ */
+ ret = wait_for_completion_timeout(&irqs_disabled_comp,
+ SDIO_IRQ_DISABLE_TIMEOUT_HZ);
+ if (!ret)
+ ath10k_warn(ar, "sdio irq disable request timed out\n");
+
+ sdio_claim_host(ar_sdio->func);
+
+ ret = sdio_release_irq(ar_sdio->func);
+ if (ret)
+ ath10k_warn(ar, "failed to release sdio interrupt: %d\n", ret);
+
+ sdio_release_host(ar_sdio->func);
+
+out:
+ kfree_skb(skb);
+}
+
+static void ath10k_sdio_hif_stop(struct ath10k *ar)
+{
+ struct ath10k_sdio_bus_request *req, *tmp_req;
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+
+ ath10k_sdio_irq_disable(ar);
+
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ /* Free all bus requests that have not been handled */
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ struct ath10k_htc_ep *ep;
+
+ list_del(&req->list);
+
+ if (req->htc_msg) {
+ ep = &ar->htc.endpoint[req->eid];
+ ath10k_htc_notify_tx_completion(ep, req->skb);
+ } else if (req->skb) {
+ kfree_skb(req->skb);
+ }
+ ath10k_sdio_free_bus_req(ar, req);
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+}
+
+#ifdef CONFIG_PM
+
+static int ath10k_sdio_hif_suspend(struct ath10k *ar)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ath10k_sdio_hif_resume(struct ath10k *ar)
+{
+ switch (ar->state) {
+ case ATH10K_STATE_OFF:
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio resume configuring sdio\n");
+
+ /* need to set sdio settings after power is cut from sdio */
+ ath10k_sdio_config(ar);
+ break;
+
+ case ATH10K_STATE_ON:
+ default:
+ break;
+ }
+
+ return 0;
+}
+#endif
+
+static int ath10k_sdio_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct ath10k_htc *htc = &ar->htc;
+ u32 htt_addr, wmi_addr, htt_mbox_size, wmi_mbox_size;
+ enum ath10k_htc_ep_id eid;
+ bool ep_found = false;
+ int i;
+
+ /* For sdio, we are interested in the mapping between eid
+ * and pipeid rather than service_id to pipe_id.
+ * First we find out which eid has been allocated to the
+ * service...
+ */
+ for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
+ if (htc->endpoint[i].service_id == service_id) {
+ eid = htc->endpoint[i].eid;
+ ep_found = true;
+ break;
+ }
+ }
+
+ if (!ep_found)
+ return -EINVAL;
+
+ /* Then we create the simplest mapping possible between pipeid
+ * and eid
+ */
+ *ul_pipe = *dl_pipe = (u8)eid;
+
+ /* Normally, HTT will use the upper part of the extended
+ * mailbox address space (ext_info[1].htc_ext_addr) and WMI ctrl
+ * the lower part (ext_info[0].htc_ext_addr).
+ * If fw wants swapping of mailbox addresses, the opposite is true.
+ */
+ if (ar_sdio->swap_mbox) {
+ htt_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
+ wmi_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
+ htt_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
+ wmi_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
+ } else {
+ htt_addr = ar_sdio->mbox_info.ext_info[1].htc_ext_addr;
+ wmi_addr = ar_sdio->mbox_info.ext_info[0].htc_ext_addr;
+ htt_mbox_size = ar_sdio->mbox_info.ext_info[1].htc_ext_sz;
+ wmi_mbox_size = ar_sdio->mbox_info.ext_info[0].htc_ext_sz;
+ }
+
+ switch (service_id) {
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ /* HTC ctrl ep mbox address has already been setup in
+ * ath10k_sdio_hif_start
+ */
+ break;
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ ar_sdio->mbox_addr[eid] = wmi_addr;
+ ar_sdio->mbox_size[eid] = wmi_mbox_size;
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio wmi ctrl mbox_addr 0x%x mbox_size %d\n",
+ ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
+ break;
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ ar_sdio->mbox_addr[eid] = htt_addr;
+ ar_sdio->mbox_size[eid] = htt_mbox_size;
+ ath10k_dbg(ar, ATH10K_DBG_SDIO,
+ "sdio htt data mbox_addr 0x%x mbox_size %d\n",
+ ar_sdio->mbox_addr[eid], ar_sdio->mbox_size[eid]);
+ break;
+ default:
+ ath10k_warn(ar, "unsupported HTC service id: %d\n",
+ service_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath10k_sdio_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hif get default pipe\n");
+
+ /* HTC ctrl ep (SVC id 1) always has eid (and pipe_id in our
+ * case) == 0
+ */
+ *ul_pipe = 0;
+ *dl_pipe = 0;
+}
+
+/* This op is currently only used by htc_wait_target if the HTC ready
+ * message times out. It is not applicable for SDIO since there is nothing
+ * we can do if the HTC ready message does not arrive in time.
+ * TODO: Make this op non mandatory by introducing a NULL check in the
+ * hif op wrapper.
+ */
+static void ath10k_sdio_hif_send_complete_check(struct ath10k *ar,
+ u8 pipe, int force)
+{
+}
+
+static const struct ath10k_hif_ops ath10k_sdio_hif_ops = {
+ .tx_sg = ath10k_sdio_hif_tx_sg,
+ .diag_read = ath10k_sdio_hif_diag_read,
+ .diag_write = ath10k_sdio_hif_diag_write_mem,
+ .exchange_bmi_msg = ath10k_sdio_bmi_exchange_msg,
+ .start = ath10k_sdio_hif_start,
+ .stop = ath10k_sdio_hif_stop,
+ .map_service_to_pipe = ath10k_sdio_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_sdio_hif_get_default_pipe,
+ .send_complete_check = ath10k_sdio_hif_send_complete_check,
+ .power_up = ath10k_sdio_hif_power_up,
+ .power_down = ath10k_sdio_hif_power_down,
+#ifdef CONFIG_PM
+ .suspend = ath10k_sdio_hif_suspend,
+ .resume = ath10k_sdio_hif_resume,
+#endif
+};
+
+#ifdef CONFIG_PM_SLEEP
+
+/* Empty handlers so that mmc subsystem doesn't remove us entirely during
+ * suspend. We instead follow cfg80211 suspend/resume handlers.
+ */
+static int ath10k_sdio_pm_suspend(struct device *device)
+{
+ return 0;
+}
+
+static int ath10k_sdio_pm_resume(struct device *device)
+{
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath10k_sdio_pm_ops, ath10k_sdio_pm_suspend,
+ ath10k_sdio_pm_resume);
+
+#define ATH10K_SDIO_PM_OPS (&ath10k_sdio_pm_ops)
+
+#else
+
+#define ATH10K_SDIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static int ath10k_sdio_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ struct ath10k_sdio *ar_sdio;
+ struct ath10k *ar;
+ enum ath10k_hw_rev hw_rev;
+ u32 chip_id, dev_id_base;
+ int ret, i;
+
+ /* Assumption: All SDIO based chipsets (so far) are QCA6174 based.
+ * If there will be newer chipsets that does not use the hw reg
+ * setup as defined in qca6174_regs and qca6174_values, this
+ * assumption is no longer valid and hw_rev must be setup differently
+ * depending on chipset.
+ */
+ hw_rev = ATH10K_HW_QCA6174;
+
+ ar = ath10k_core_create(sizeof(*ar_sdio), &func->dev, ATH10K_BUS_SDIO,
+ hw_rev, &ath10k_sdio_hif_ops);
+ if (!ar) {
+ dev_err(&func->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ func->num, func->vendor, func->device,
+ func->max_blksize, func->cur_blksize);
+
+ ar_sdio = ath10k_sdio_priv(ar);
+
+ ar_sdio->irq_data.irq_proc_reg =
+ devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_proc_regs),
+ GFP_KERNEL);
+ if (!ar_sdio->irq_data.irq_proc_reg) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ ar_sdio->irq_data.irq_en_reg =
+ devm_kzalloc(ar->dev, sizeof(struct ath10k_sdio_irq_enable_regs),
+ GFP_KERNEL);
+ if (!ar_sdio->irq_data.irq_en_reg) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ ar_sdio->bmi_buf = devm_kzalloc(ar->dev, BMI_MAX_CMDBUF_SIZE, GFP_KERNEL);
+ if (!ar_sdio->bmi_buf) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ ar_sdio->func = func;
+ sdio_set_drvdata(func, ar_sdio);
+
+ ar_sdio->is_disabled = true;
+ ar_sdio->ar = ar;
+
+ spin_lock_init(&ar_sdio->lock);
+ spin_lock_init(&ar_sdio->wr_async_lock);
+ mutex_init(&ar_sdio->irq_data.mtx);
+
+ INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
+ INIT_LIST_HEAD(&ar_sdio->wr_asyncq);
+
+ INIT_WORK(&ar_sdio->wr_async_work, ath10k_sdio_write_async_work);
+ ar_sdio->workqueue = create_singlethread_workqueue("ath10k_sdio_wq");
+ if (!ar_sdio->workqueue) {
+ ret = -ENOMEM;
+ goto err_core_destroy;
+ }
+
+ for (i = 0; i < ATH10K_SDIO_BUS_REQUEST_MAX_NUM; i++)
+ ath10k_sdio_free_bus_req(ar, &ar_sdio->bus_req[i]);
+
+ dev_id_base = FIELD_GET(QCA_MANUFACTURER_ID_BASE, id->device);
+ switch (dev_id_base) {
+ case QCA_MANUFACTURER_ID_AR6005_BASE:
+ case QCA_MANUFACTURER_ID_QCA9377_BASE:
+ ar->dev_id = QCA9377_1_0_DEVICE_ID;
+ break;
+ default:
+ ret = -ENODEV;
+ ath10k_err(ar, "unsupported device id %u (0x%x)\n",
+ dev_id_base, id->device);
+ goto err_free_wq;
+ }
+
+ ar->id.vendor = id->vendor;
+ ar->id.device = id->device;
+
+ ath10k_sdio_set_mbox_info(ar);
+
+ ret = ath10k_sdio_config(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to config sdio: %d\n", ret);
+ goto err_free_wq;
+ }
+
+ /* TODO: don't know yet how to get chip_id with SDIO */
+ chip_id = 0;
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_free_wq;
+ }
+
+ /* TODO: remove this once SDIO support is fully implemented */
+ ath10k_warn(ar, "WARNING: ath10k SDIO support is incomplete, don't expect anything to work!\n");
+
+ return 0;
+
+err_free_wq:
+ destroy_workqueue(ar_sdio->workqueue);
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static void ath10k_sdio_remove(struct sdio_func *func)
+{
+ struct ath10k_sdio *ar_sdio = sdio_get_drvdata(func);
+ struct ath10k *ar = ar_sdio->ar;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "sdio removed func %d vendor 0x%x device 0x%x\n",
+ func->num, func->vendor, func->device);
+
+ (void)ath10k_sdio_hif_disable_intrs(ar);
+ cancel_work_sync(&ar_sdio->wr_async_work);
+ ath10k_core_unregister(ar);
+ ath10k_core_destroy(ar);
+
+ flush_workqueue(ar_sdio->workqueue);
+ destroy_workqueue(ar_sdio->workqueue);
+}
+
+static const struct sdio_device_id ath10k_sdio_devices[] = {
+ {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
+ (QCA_SDIO_ID_AR6005_BASE | 0xA))},
+ {SDIO_DEVICE(QCA_MANUFACTURER_CODE,
+ (QCA_SDIO_ID_QCA9377_BASE | 0x1))},
+ {},
+};
+
+MODULE_DEVICE_TABLE(sdio, ath10k_sdio_devices);
+
+static struct sdio_driver ath10k_sdio_driver = {
+ .name = "ath10k_sdio",
+ .id_table = ath10k_sdio_devices,
+ .probe = ath10k_sdio_probe,
+ .remove = ath10k_sdio_remove,
+ .drv.pm = ATH10K_SDIO_PM_OPS,
+};
+
+static int __init ath10k_sdio_init(void)
+{
+ int ret;
+
+ ret = sdio_register_driver(&ath10k_sdio_driver);
+ if (ret)
+ pr_err("sdio driver registration failed: %d\n", ret);
+
+ return ret;
+}
+
+static void __exit ath10k_sdio_exit(void)
+{
+ sdio_unregister_driver(&ath10k_sdio_driver);
+}
+
+module_init(ath10k_sdio_init);
+module_exit(ath10k_sdio_exit);
+
+MODULE_AUTHOR("Qualcomm Atheros");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/sdio.h b/drivers/net/wireless/ath/ath10k/sdio.h
new file mode 100644
index 000000000..453eb6263
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/sdio.h
@@ -0,0 +1,227 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SDIO_H_
+#define _SDIO_H_
+
+#define ATH10K_HIF_MBOX_BLOCK_SIZE 256
+
+#define QCA_MANUFACTURER_ID_BASE GENMASK(11, 8)
+#define QCA_MANUFACTURER_ID_AR6005_BASE 0x5
+#define QCA_MANUFACTURER_ID_QCA9377_BASE 0x7
+#define QCA_SDIO_ID_AR6005_BASE 0x500
+#define QCA_SDIO_ID_QCA9377_BASE 0x700
+#define QCA_MANUFACTURER_ID_REV_MASK 0x00FF
+#define QCA_MANUFACTURER_CODE 0x271 /* Qualcomm/Atheros */
+
+#define ATH10K_SDIO_MAX_BUFFER_SIZE 4096 /*Unsure of this constant*/
+
+/* Mailbox address in SDIO address space */
+#define ATH10K_HIF_MBOX_BASE_ADDR 0x1000
+#define ATH10K_HIF_MBOX_WIDTH 0x800
+
+#define ATH10K_HIF_MBOX_TOT_WIDTH \
+ (ATH10K_HIF_MBOX_NUM_MAX * ATH10K_HIF_MBOX_WIDTH)
+
+#define ATH10K_HIF_MBOX0_EXT_BASE_ADDR 0x5000
+#define ATH10K_HIF_MBOX0_EXT_WIDTH (36 * 1024)
+#define ATH10K_HIF_MBOX0_EXT_WIDTH_ROME_2_0 (56 * 1024)
+#define ATH10K_HIF_MBOX1_EXT_WIDTH (36 * 1024)
+#define ATH10K_HIF_MBOX_DUMMY_SPACE_SIZE (2 * 1024)
+
+#define ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH \
+ (ATH10K_SDIO_MAX_BUFFER_SIZE - sizeof(struct ath10k_htc_hdr))
+
+#define ATH10K_HIF_MBOX_NUM_MAX 4
+#define ATH10K_SDIO_BUS_REQUEST_MAX_NUM 64
+
+#define ATH10K_SDIO_HIF_COMMUNICATION_TIMEOUT_HZ (100 * HZ)
+
+/* HTC runs over mailbox 0 */
+#define ATH10K_HTC_MAILBOX 0
+#define ATH10K_HTC_MAILBOX_MASK BIT(ATH10K_HTC_MAILBOX)
+
+/* GMBOX addresses */
+#define ATH10K_HIF_GMBOX_BASE_ADDR 0x7000
+#define ATH10K_HIF_GMBOX_WIDTH 0x4000
+
+/* Modified versions of the sdio.h macros.
+ * The macros in sdio.h can't be used easily with the FIELD_{PREP|GET}
+ * macros in bitfield.h, so we define our own macros here.
+ */
+#define ATH10K_SDIO_DRIVE_DTSX_MASK \
+ (SDIO_DRIVE_DTSx_MASK << SDIO_DRIVE_DTSx_SHIFT)
+
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_B 0
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_A 1
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_C 2
+#define ATH10K_SDIO_DRIVE_DTSX_TYPE_D 3
+
+/* SDIO CCCR register definitions */
+#define CCCR_SDIO_IRQ_MODE_REG 0xF0
+#define CCCR_SDIO_IRQ_MODE_REG_SDIO3 0x16
+
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_ADDR 0xF2
+
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_A 0x02
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_C 0x04
+#define CCCR_SDIO_DRIVER_STRENGTH_ENABLE_D 0x08
+
+#define CCCR_SDIO_ASYNC_INT_DELAY_ADDRESS 0xF0
+#define CCCR_SDIO_ASYNC_INT_DELAY_MASK 0xC0
+
+/* mode to enable special 4-bit interrupt assertion without clock */
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ BIT(0)
+#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ_SDIO3 BIT(1)
+
+#define ATH10K_SDIO_TARGET_DEBUG_INTR_MASK 0x01
+
+/* The theoretical maximum number of RX messages that can be fetched
+ * from the mbox interrupt handler in one loop is derived in the following
+ * way:
+ *
+ * Let's assume that each packet in a bundle of the maximum bundle size
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE) has the HTC header bundle count set
+ * to the maximum value (HTC_HOST_MAX_MSG_PER_RX_BUNDLE).
+ *
+ * in this case the driver must allocate
+ * (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE) skb's.
+ */
+#define ATH10K_SDIO_MAX_RX_MSGS \
+ (HTC_HOST_MAX_MSG_PER_RX_BUNDLE * HTC_HOST_MAX_MSG_PER_RX_BUNDLE)
+
+#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL 0x00000868u
+#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_OFF 0xFFFEFFFF
+#define ATH10K_FIFO_TIMEOUT_AND_CHIP_CONTROL_DISABLE_SLEEP_ON 0x10000
+
+struct ath10k_sdio_bus_request {
+ struct list_head list;
+
+ /* sdio address */
+ u32 address;
+
+ struct sk_buff *skb;
+ enum ath10k_htc_ep_id eid;
+ int status;
+ /* Specifies if the current request is an HTC message.
+ * If not, the eid is not applicable an the TX completion handler
+ * associated with the endpoint will not be invoked.
+ */
+ bool htc_msg;
+ /* Completion that (if set) will be invoked for non HTC requests
+ * (htc_msg == false) when the request has been processed.
+ */
+ struct completion *comp;
+};
+
+struct ath10k_sdio_rx_data {
+ struct sk_buff *skb;
+ size_t alloc_len;
+ size_t act_len;
+ enum ath10k_htc_ep_id eid;
+ bool part_of_bundle;
+ bool last_in_bundle;
+ bool trailer_only;
+ int status;
+};
+
+struct ath10k_sdio_irq_proc_regs {
+ u8 host_int_status;
+ u8 cpu_int_status;
+ u8 error_int_status;
+ u8 counter_int_status;
+ u8 mbox_frame;
+ u8 rx_lookahead_valid;
+ u8 host_int_status2;
+ u8 gmbox_rx_avail;
+ __le32 rx_lookahead[2];
+ __le32 rx_gmbox_lookahead_alias[2];
+};
+
+struct ath10k_sdio_irq_enable_regs {
+ u8 int_status_en;
+ u8 cpu_int_status_en;
+ u8 err_int_status_en;
+ u8 cntr_int_status_en;
+};
+
+struct ath10k_sdio_irq_data {
+ /* protects irq_proc_reg and irq_en_reg below.
+ * We use a mutex here and not a spinlock since we will have the
+ * mutex locked while calling the sdio_memcpy_ functions.
+ * These function require non atomic context, and hence, spinlocks
+ * can be held while calling these functions.
+ */
+ struct mutex mtx;
+ struct ath10k_sdio_irq_proc_regs *irq_proc_reg;
+ struct ath10k_sdio_irq_enable_regs *irq_en_reg;
+};
+
+struct ath10k_mbox_ext_info {
+ u32 htc_ext_addr;
+ u32 htc_ext_sz;
+};
+
+struct ath10k_mbox_info {
+ u32 htc_addr;
+ struct ath10k_mbox_ext_info ext_info[2];
+ u32 block_size;
+ u32 block_mask;
+ u32 gmbox_addr;
+ u32 gmbox_sz;
+};
+
+struct ath10k_sdio {
+ struct sdio_func *func;
+
+ struct ath10k_mbox_info mbox_info;
+ bool swap_mbox;
+ u32 mbox_addr[ATH10K_HTC_EP_COUNT];
+ u32 mbox_size[ATH10K_HTC_EP_COUNT];
+
+ /* available bus requests */
+ struct ath10k_sdio_bus_request bus_req[ATH10K_SDIO_BUS_REQUEST_MAX_NUM];
+ /* free list of bus requests */
+ struct list_head bus_req_freeq;
+ /* protects access to bus_req_freeq */
+ spinlock_t lock;
+
+ struct ath10k_sdio_rx_data rx_pkts[ATH10K_SDIO_MAX_RX_MSGS];
+ size_t n_rx_pkts;
+
+ struct ath10k *ar;
+ struct ath10k_sdio_irq_data irq_data;
+
+ /* temporary buffer for BMI requests */
+ u8 *bmi_buf;
+
+ bool is_disabled;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct wr_async_work;
+ struct list_head wr_asyncq;
+ /* protects access to wr_asyncq */
+ spinlock_t wr_async_lock;
+};
+
+static inline struct ath10k_sdio *ath10k_sdio_priv(struct ath10k *ar)
+{
+ return (struct ath10k_sdio *)ar->drv_priv;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
new file mode 100644
index 000000000..241e6f0e1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -0,0 +1,1397 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "ce.h"
+#include "debug.h"
+#include "hif.h"
+#include "htc.h"
+#include "snoc.h"
+
+#define ATH10K_SNOC_RX_POST_RETRY_MS 50
+#define CE_POLL_PIPE 4
+
+static char *const ce_name[] = {
+ "WLAN_CE_0",
+ "WLAN_CE_1",
+ "WLAN_CE_2",
+ "WLAN_CE_3",
+ "WLAN_CE_4",
+ "WLAN_CE_5",
+ "WLAN_CE_6",
+ "WLAN_CE_7",
+ "WLAN_CE_8",
+ "WLAN_CE_9",
+ "WLAN_CE_10",
+ "WLAN_CE_11",
+};
+
+static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
+ {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+ {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+ {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+ {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+static struct ath10k_wcn3990_clk_info clk_cfg[] = {
+ {NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state);
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state);
+
+static const struct ath10k_snoc_drv_priv drv_priv = {
+ .hw_rev = ATH10K_HW_WCN3990,
+ .dma_mask = DMA_BIT_MASK(37),
+};
+
+static struct ce_attr host_ce_config_wlan[] = {
+ /* CE0: host->target HTC control streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath10k_snoc_htc_tx_cb,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 64,
+ .recv_cb = ath10k_snoc_htc_rx_cb,
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath10k_snoc_htc_tx_cb,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 256,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ .send_cb = ath10k_snoc_htt_tx_cb,
+ },
+
+ /* CE5: target->host HTT (ipa_uc->target ) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 512,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_rx_cb,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: ce_diag, the Diagnostic Window */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 2,
+ .src_sz_max = 2048,
+ .dest_nentries = 2,
+ },
+
+ /* CE8: Target to uMC */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 128,
+ },
+
+ /* CE9 target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+
+ /* CE10: target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+
+ /* CE11: target -> host PKTLOG */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath10k_snoc_htt_htc_rx_cb,
+ },
+};
+
+static struct service_to_pipe target_service_to_ce_map_wlan[] = {
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ { /* not used */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT),
+ __cpu_to_le32(5),
+ },
+ { /* in = DL = target -> host */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA2_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(9),
+ },
+ { /* in = DL = target -> host */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA3_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(10),
+ },
+ { /* in = DL = target -> host pktlog */
+ __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_LOG_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(11),
+ },
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ iowrite32(value, ar_snoc->mem + offset);
+}
+
+u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ u32 val;
+
+ val = ioread32(ar_snoc->mem + offset);
+
+ return val;
+}
+
+static int __ath10k_snoc_rx_post_buf(struct ath10k_snoc_pipe *pipe)
+{
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret;
+
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb)
+ return -ENOMEM;
+
+ WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
+
+ paddr = dma_map_single(ar->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ar->dev, paddr))) {
+ ath10k_warn(ar, "failed to dma map snoc rx buf\n");
+ dev_kfree_skb_any(skb);
+ return -EIO;
+ }
+
+ ATH10K_SKB_RXCB(skb)->paddr = paddr;
+
+ spin_lock_bh(&ce->ce_lock);
+ ret = ce_pipe->ops->ce_rx_post_buf(ce_pipe, skb, paddr);
+ spin_unlock_bh(&ce->ce_lock);
+ if (ret) {
+ dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_snoc_rx_post_pipe(struct ath10k_snoc_pipe *pipe)
+{
+ struct ath10k *ar = pipe->hif_ce_state;
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
+ int ret, num;
+
+ if (pipe->buf_sz == 0)
+ return;
+
+ if (!ce_pipe->dest_ring)
+ return;
+
+ spin_lock_bh(&ce->ce_lock);
+ num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
+ spin_unlock_bh(&ce->ce_lock);
+ while (num--) {
+ ret = __ath10k_snoc_rx_post_buf(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ break;
+ ath10k_warn(ar, "failed to post rx buf: %d\n", ret);
+ mod_timer(&ar_snoc->rx_post_retry, jiffies +
+ ATH10K_SNOC_RX_POST_RETRY_MS);
+ break;
+ }
+ }
+}
+
+static void ath10k_snoc_rx_post(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_snoc_rx_post_pipe(&ar_snoc->pipe_info[i]);
+}
+
+static void ath10k_snoc_process_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_pipe *pipe_info = &ar_snoc->pipe_info[ce_state->id];
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+
+ callback(ar, skb);
+ }
+
+ ath10k_snoc_rx_post_pipe(pipe_info);
+}
+
+static void ath10k_snoc_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ /* CE4 polling needs to be done whenever CE pipe which transports
+ * HTT Rx (target->host) is processed.
+ */
+ ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_htc_rx_completion_handler);
+}
+
+static void ath10k_snoc_htt_rx_deliver(struct ath10k *ar, struct sk_buff *skb)
+{
+ skb_pull(skb, sizeof(struct ath10k_htc_hdr));
+ ath10k_htt_t2h_msg_handler(ar, skb);
+}
+
+static void ath10k_snoc_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ ath10k_ce_per_engine_service(ce_state->ar, CE_POLL_PIPE);
+ ath10k_snoc_process_rx_cb(ce_state, ath10k_snoc_htt_rx_deliver);
+}
+
+static void ath10k_snoc_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath10k_snoc *ar_snoc = from_timer(ar_snoc, t, rx_post_retry);
+ struct ath10k *ar = ar_snoc->ar;
+
+ ath10k_snoc_rx_post(ar);
+}
+
+static void ath10k_snoc_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff_head list;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ if (!skb)
+ continue;
+
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list)))
+ ath10k_htc_tx_completion_handler(ar, skb);
+}
+
+static void ath10k_snoc_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
+{
+ struct ath10k *ar = ce_state->ar;
+ struct sk_buff *skb;
+
+ while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+ ath10k_htt_hif_tx_complete(ar, skb);
+ }
+}
+
+static int ath10k_snoc_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_snoc_pipe *snoc_pipe;
+ struct ath10k_ce_pipe *ce_pipe;
+ int err, i = 0;
+
+ snoc_pipe = &ar_snoc->pipe_info[pipe_id];
+ ce_pipe = snoc_pipe->ce_hdl;
+ spin_lock_bh(&ce->ce_lock);
+
+ for (i = 0; i < n_items - 1; i++) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snoc tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ CE_SEND_FLAG_GATHER);
+ if (err)
+ goto err;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snoc tx item %d paddr %pad len %d n_items %d\n",
+ i, &items[i].paddr, items[i].len, n_items);
+
+ err = ath10k_ce_send_nolock(ce_pipe,
+ items[i].transfer_context,
+ items[i].paddr,
+ items[i].len,
+ items[i].transfer_id,
+ 0);
+ if (err)
+ goto err;
+
+ spin_unlock_bh(&ce->ce_lock);
+
+ return 0;
+
+err:
+ for (; i > 0; i--)
+ __ath10k_ce_send_revert(ce_pipe);
+
+ spin_unlock_bh(&ce->ce_lock);
+ return err;
+}
+
+static int ath10k_snoc_hif_get_target_info(struct ath10k *ar,
+ struct bmi_target_info *target_info)
+{
+ target_info->version = ATH10K_HW_WCN3990;
+ target_info->type = ATH10K_HW_WCN3990;
+
+ return 0;
+}
+
+static u16 ath10k_snoc_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "hif get free queue number\n");
+
+ return ath10k_ce_num_free_src_entries(ar_snoc->pipe_info[pipe].ce_hdl);
+}
+
+static void ath10k_snoc_hif_send_complete_check(struct ath10k *ar, u8 pipe,
+ int force)
+{
+ int resources;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif send complete check\n");
+
+ if (!force) {
+ resources = ath10k_snoc_hif_get_free_queue_number(ar, pipe);
+
+ if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
+ return;
+ }
+ ath10k_ce_per_engine_service(ar, pipe);
+}
+
+static int ath10k_snoc_hif_map_service_to_pipe(struct ath10k *ar,
+ u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif map service\n");
+
+ for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
+ entry = &target_service_to_ce_map_wlan[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static void ath10k_snoc_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc hif get default pipe\n");
+
+ (void)ath10k_snoc_hif_map_service_to_pipe(ar,
+ ATH10K_HTC_SVC_ID_RSVD_CTRL,
+ ul_pipe, dl_pipe);
+}
+
+static inline void ath10k_snoc_irq_disable(struct ath10k *ar)
+{
+ ath10k_ce_disable_interrupts(ar);
+}
+
+static inline void ath10k_snoc_irq_enable(struct ath10k *ar)
+{
+ ath10k_ce_enable_interrupts(ar);
+}
+
+static void ath10k_snoc_rx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct sk_buff *skb;
+ struct ath10k *ar;
+ int i;
+
+ ar = snoc_pipe->hif_ce_state;
+ ce_pipe = snoc_pipe->ce_hdl;
+ ce_ring = ce_pipe->dest_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!snoc_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath10k_snoc_tx_pipe_cleanup(struct ath10k_snoc_pipe *snoc_pipe)
+{
+ struct ath10k_ce_pipe *ce_pipe;
+ struct ath10k_ce_ring *ce_ring;
+ struct ath10k_snoc *ar_snoc;
+ struct sk_buff *skb;
+ struct ath10k *ar;
+ int i;
+
+ ar = snoc_pipe->hif_ce_state;
+ ar_snoc = ath10k_snoc_priv(ar);
+ ce_pipe = snoc_pipe->ce_hdl;
+ ce_ring = ce_pipe->src_ring;
+
+ if (!ce_ring)
+ return;
+
+ if (!snoc_pipe->buf_sz)
+ return;
+
+ for (i = 0; i < ce_ring->nentries; i++) {
+ skb = ce_ring->per_transfer_context[i];
+ if (!skb)
+ continue;
+
+ ce_ring->per_transfer_context[i] = NULL;
+
+ ath10k_htc_tx_completion_handler(ar, skb);
+ }
+}
+
+static void ath10k_snoc_buffer_cleanup(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_snoc_pipe *pipe_info;
+ int pipe_num;
+
+ del_timer_sync(&ar_snoc->rx_post_retry);
+ for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ pipe_info = &ar_snoc->pipe_info[pipe_num];
+ ath10k_snoc_rx_pipe_cleanup(pipe_info);
+ ath10k_snoc_tx_pipe_cleanup(pipe_info);
+ }
+}
+
+static void ath10k_snoc_hif_stop(struct ath10k *ar)
+{
+ ath10k_snoc_irq_disable(ar);
+ ath10k_snoc_buffer_cleanup(ar);
+ napi_synchronize(&ar->napi);
+ napi_disable(&ar->napi);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
+}
+
+static int ath10k_snoc_hif_start(struct ath10k *ar)
+{
+ ath10k_snoc_irq_enable(ar);
+ ath10k_snoc_rx_post(ar);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
+
+ return 0;
+}
+
+static int ath10k_snoc_init_pipes(struct ath10k *ar)
+{
+ int i, ret;
+
+ for (i = 0; i < CE_COUNT; i++) {
+ ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_snoc_wlan_enable(struct ath10k *ar)
+{
+ return 0;
+}
+
+static void ath10k_snoc_wlan_disable(struct ath10k *ar)
+{
+}
+
+static void ath10k_snoc_hif_power_down(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
+
+ ath10k_snoc_wlan_disable(ar);
+ ath10k_ce_free_rri(ar);
+}
+
+static int ath10k_snoc_hif_power_up(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s:WCN3990 driver state = %d\n",
+ __func__, ar->state);
+
+ ret = ath10k_snoc_wlan_enable(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to enable wcn3990: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_ce_alloc_rri(ar);
+
+ ret = ath10k_snoc_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+ goto err_free_rri;
+ }
+
+ napi_enable(&ar->napi);
+ return 0;
+
+err_free_rri:
+ ath10k_ce_free_rri(ar);
+ ath10k_snoc_wlan_disable(ar);
+
+ return ret;
+}
+
+static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
+ .read32 = ath10k_snoc_read32,
+ .write32 = ath10k_snoc_write32,
+ .start = ath10k_snoc_hif_start,
+ .stop = ath10k_snoc_hif_stop,
+ .map_service_to_pipe = ath10k_snoc_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_snoc_hif_get_default_pipe,
+ .power_up = ath10k_snoc_hif_power_up,
+ .power_down = ath10k_snoc_hif_power_down,
+ .tx_sg = ath10k_snoc_hif_tx_sg,
+ .send_complete_check = ath10k_snoc_hif_send_complete_check,
+ .get_free_queue_number = ath10k_snoc_hif_get_free_queue_number,
+ .get_target_info = ath10k_snoc_hif_get_target_info,
+};
+
+static const struct ath10k_bus_ops ath10k_snoc_bus_ops = {
+ .read32 = ath10k_snoc_read32,
+ .write32 = ath10k_snoc_write32,
+};
+
+static int ath10k_snoc_get_ce_id_from_irq(struct ath10k *ar, int irq)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int i;
+
+ for (i = 0; i < CE_COUNT_MAX; i++) {
+ if (ar_snoc->ce_irqs[i].irq_line == irq)
+ return i;
+ }
+ ath10k_err(ar, "No matching CE id for irq %d\n", irq);
+
+ return -EINVAL;
+}
+
+static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
+{
+ struct ath10k *ar = arg;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int ce_id = ath10k_snoc_get_ce_id_from_irq(ar, irq);
+
+ if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_snoc->pipe_info)) {
+ ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
+ ce_id);
+ return IRQ_HANDLED;
+ }
+
+ ath10k_snoc_irq_disable(ar);
+ napi_schedule(&ar->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
+{
+ struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ int done = 0;
+
+ ath10k_ce_per_engine_service_any(ar);
+ done = ath10k_htt_txrx_compl_task(ar, budget);
+
+ if (done < budget) {
+ napi_complete(ctx);
+ ath10k_snoc_irq_enable(ar);
+ }
+
+ return done;
+}
+
+static void ath10k_snoc_init_napi(struct ath10k *ar)
+{
+ netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
+ ATH10K_NAPI_BUDGET);
+}
+
+static int ath10k_snoc_request_irq(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int irqflags = IRQF_TRIGGER_RISING;
+ int ret, id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++) {
+ ret = request_irq(ar_snoc->ce_irqs[id].irq_line,
+ ath10k_snoc_per_engine_handler,
+ irqflags, ce_name[id], ar);
+ if (ret) {
+ ath10k_err(ar,
+ "failed to register IRQ handler for CE %d: %d",
+ id, ret);
+ goto err_irq;
+ }
+ }
+
+ return 0;
+
+err_irq:
+ for (id -= 1; id >= 0; id--)
+ free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
+
+ return ret;
+}
+
+static void ath10k_snoc_free_irq(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ int id;
+
+ for (id = 0; id < CE_COUNT_MAX; id++)
+ free_irq(ar_snoc->ce_irqs[id].irq_line, ar);
+}
+
+static int ath10k_snoc_resource_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct platform_device *pdev;
+ struct resource *res;
+ int i, ret = 0;
+
+ pdev = ar_snoc->dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "membase");
+ if (!res) {
+ ath10k_err(ar, "Memory base not found in DT\n");
+ return -EINVAL;
+ }
+
+ ar_snoc->mem_pa = res->start;
+ ar_snoc->mem = devm_ioremap(&pdev->dev, ar_snoc->mem_pa,
+ resource_size(res));
+ if (!ar_snoc->mem) {
+ ath10k_err(ar, "Memory base ioremap failed with physical address %pa\n",
+ &ar_snoc->mem_pa);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < CE_COUNT; i++) {
+ res = platform_get_resource(ar_snoc->dev, IORESOURCE_IRQ, i);
+ if (!res) {
+ ath10k_err(ar, "failed to get IRQ%d\n", i);
+ ret = -ENODEV;
+ goto out;
+ }
+ ar_snoc->ce_irqs[i].irq_line = res->start;
+ }
+
+out:
+ return ret;
+}
+
+static int ath10k_snoc_setup_resource(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_snoc_pipe *pipe;
+ int i, ret;
+
+ timer_setup(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry, 0);
+ spin_lock_init(&ce->ce_lock);
+ for (i = 0; i < CE_COUNT; i++) {
+ pipe = &ar_snoc->pipe_info[i];
+ pipe->ce_hdl = &ce->ce_states[i];
+ pipe->pipe_num = i;
+ pipe->hif_ce_state = ar;
+
+ ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
+ if (ret) {
+ ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
+ i, ret);
+ return ret;
+ }
+
+ pipe->buf_sz = host_ce_config_wlan[i].src_sz_max;
+ }
+ ath10k_snoc_init_napi(ar);
+
+ return 0;
+}
+
+static void ath10k_snoc_release_resource(struct ath10k *ar)
+{
+ int i;
+
+ netif_napi_del(&ar->napi);
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_ce_free_pipe(ar, i);
+}
+
+static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_vreg_info *vreg_info)
+{
+ struct regulator *reg;
+ int ret = 0;
+
+ reg = devm_regulator_get_optional(dev, vreg_info->name);
+
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ if (ret == -EPROBE_DEFER) {
+ ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
+ vreg_info->name);
+ return ret;
+ }
+ if (vreg_info->required) {
+ ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ return ret;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Optional regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto done;
+ }
+
+ vreg_info->reg = reg;
+
+done:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "snog vreg %s min_v %u max_v %u load_ua %u settle_delay %lu\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+ vreg_info->load_ua, vreg_info->settle_delay);
+
+ return 0;
+}
+
+static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_clk_info *clk_info)
+{
+ struct clk *handle;
+ int ret = 0;
+
+ handle = devm_clk_get(dev, clk_info->name);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ if (clk_info->required) {
+ ath10k_err(ar, "snoc clock %s isn't available: %d\n",
+ clk_info->name, ret);
+ return ret;
+ }
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc ignoring clock %s: %d\n",
+ clk_info->name,
+ ret);
+ return 0;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s freq %u\n",
+ clk_info->name, clk_info->freq);
+
+ clk_info->handle = handle;
+
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n",
+ vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+ vreg_info->max_v);
+ if (ret) {
+ ath10k_err(ar,
+ "failed to set regulator %s voltage-min: %d voltage-max: %d\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v);
+ goto err_reg_config;
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg,
+ vreg_info->load_ua);
+ if (ret < 0) {
+ ath10k_err(ar,
+ "failed to set regulator %s load: %d\n",
+ vreg_info->name,
+ vreg_info->load_ua);
+ goto err_reg_config;
+ }
+ }
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ ath10k_err(ar, "failed to enable regulator %s\n",
+ vreg_info->name);
+ goto err_reg_config;
+ }
+
+ if (vreg_info->settle_delay)
+ udelay(vreg_info->settle_delay);
+ }
+
+ return 0;
+
+err_reg_config:
+ for (; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ regulator_disable(vreg_info->reg);
+ regulator_set_load(vreg_info->reg, 0);
+ regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int ret = 0;
+ int i;
+
+ for (i = ARRAY_SIZE(vreg_cfg) - 1; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n",
+ vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ ath10k_err(ar, "failed to disable regulator %s\n",
+ vreg_info->name);
+
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ ath10k_err(ar, "failed to set load %s\n",
+ vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_v);
+ if (ret)
+ ath10k_err(ar, "failed to set voltage %s\n",
+ vreg_info->name);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int ret = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being enabled\n",
+ clk_info->name);
+
+ if (clk_info->freq) {
+ ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+ if (ret) {
+ ath10k_err(ar, "failed to set clock %s freq %u\n",
+ clk_info->name, clk_info->freq);
+ goto err_clock_config;
+ }
+ }
+
+ ret = clk_prepare_enable(clk_info->handle);
+ if (ret) {
+ ath10k_err(ar, "failed to enable clock %s\n",
+ clk_info->name);
+ goto err_clock_config;
+ }
+ }
+
+ return 0;
+
+err_clock_config:
+ for (i = i - 1; i >= 0; i--) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc clock %s being disabled\n",
+ clk_info->name);
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return 0;
+}
+
+static int ath10k_hw_power_on(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n");
+
+ ret = ath10k_wcn3990_vreg_on(ar);
+ if (ret)
+ return ret;
+
+ ret = ath10k_wcn3990_clk_init(ar);
+ if (ret)
+ goto vreg_off;
+
+ return ret;
+
+vreg_off:
+ ath10k_wcn3990_vreg_off(ar);
+ return ret;
+}
+
+static int ath10k_hw_power_off(struct ath10k *ar)
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n");
+
+ ath10k_wcn3990_clk_deinit(ar);
+
+ ret = ath10k_wcn3990_vreg_off(ar);
+
+ return ret;
+}
+
+static const struct of_device_id ath10k_snoc_dt_match[] = {
+ { .compatible = "qcom,wcn3990-wifi",
+ .data = &drv_priv,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ath10k_snoc_dt_match);
+
+static int ath10k_snoc_probe(struct platform_device *pdev)
+{
+ const struct ath10k_snoc_drv_priv *drv_data;
+ const struct of_device_id *of_id;
+ struct ath10k_snoc *ar_snoc;
+ struct device *dev;
+ struct ath10k *ar;
+ int ret;
+ u32 i;
+
+ of_id = of_match_device(ath10k_snoc_dt_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ drv_data = of_id->data;
+ dev = &pdev->dev;
+
+ ret = dma_set_mask_and_coherent(dev, drv_data->dma_mask);
+ if (ret) {
+ dev_err(dev, "failed to set dma mask: %d", ret);
+ return ret;
+ }
+
+ ar = ath10k_core_create(sizeof(*ar_snoc), dev, ATH10K_BUS_SNOC,
+ drv_data->hw_rev, &ath10k_snoc_hif_ops);
+ if (!ar) {
+ dev_err(dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ ar_snoc = ath10k_snoc_priv(ar);
+ ar_snoc->dev = pdev;
+ platform_set_drvdata(pdev, ar);
+ ar_snoc->ar = ar;
+ ar_snoc->ce.bus_ops = &ath10k_snoc_bus_ops;
+ ar->ce_priv = &ar_snoc->ce;
+
+ ret = ath10k_snoc_resource_init(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to initialize resource: %d\n", ret);
+ goto err_core_destroy;
+ }
+
+ ret = ath10k_snoc_setup_resource(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to setup resource: %d\n", ret);
+ goto err_core_destroy;
+ }
+ ret = ath10k_snoc_request_irq(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to request irqs: %d\n", ret);
+ goto err_release_resource;
+ }
+
+ ar_snoc->vreg = vreg_cfg;
+ for (i = 0; i < ARRAY_SIZE(vreg_cfg); i++) {
+ ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
+ if (ret)
+ goto err_free_irq;
+ }
+
+ ar_snoc->clk = clk_cfg;
+ for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) {
+ ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
+ if (ret)
+ goto err_free_irq;
+ }
+
+ ret = ath10k_hw_power_on(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to power on device: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ ret = ath10k_core_register(ar, drv_data->hw_rev);
+ if (ret) {
+ ath10k_err(ar, "failed to register driver core: %d\n", ret);
+ goto err_hw_power_off;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
+ ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!");
+
+ return 0;
+
+err_hw_power_off:
+ ath10k_hw_power_off(ar);
+
+err_free_irq:
+ ath10k_snoc_free_irq(ar);
+
+err_release_resource:
+ ath10k_snoc_release_resource(ar);
+
+err_core_destroy:
+ ath10k_core_destroy(ar);
+
+ return ret;
+}
+
+static int ath10k_snoc_remove(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n");
+ ath10k_core_unregister(ar);
+ ath10k_hw_power_off(ar);
+ ath10k_snoc_free_irq(ar);
+ ath10k_snoc_release_resource(ar);
+ ath10k_core_destroy(ar);
+
+ return 0;
+}
+
+static struct platform_driver ath10k_snoc_driver = {
+ .probe = ath10k_snoc_probe,
+ .remove = ath10k_snoc_remove,
+ .driver = {
+ .name = "ath10k_snoc",
+ .of_match_table = ath10k_snoc_dt_match,
+ },
+};
+module_platform_driver(ath10k_snoc_driver);
+
+MODULE_AUTHOR("Qualcomm");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Driver support for Atheros WCN3990 SNOC devices");
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
new file mode 100644
index 000000000..f9e530189
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SNOC_H_
+#define _SNOC_H_
+
+#include "hw.h"
+#include "ce.h"
+
+struct ath10k_snoc_drv_priv {
+ enum ath10k_hw_rev hw_rev;
+ u64 dma_mask;
+};
+
+struct snoc_state {
+ u32 pipe_cfg_addr;
+ u32 svc_to_pipe_map;
+};
+
+struct ath10k_snoc_pipe {
+ struct ath10k_ce_pipe *ce_hdl;
+ u8 pipe_num;
+ struct ath10k *hif_ce_state;
+ size_t buf_sz;
+ /* protect ce info */
+ spinlock_t pipe_lock;
+ struct ath10k_snoc *ar_snoc;
+};
+
+struct ath10k_snoc_target_info {
+ u32 target_version;
+ u32 target_type;
+ u32 target_revision;
+ u32 soc_version;
+};
+
+struct ath10k_snoc_ce_irq {
+ u32 irq_line;
+};
+
+struct ath10k_wcn3990_vreg_info {
+ struct regulator *reg;
+ const char *name;
+ u32 min_v;
+ u32 max_v;
+ u32 load_ua;
+ unsigned long settle_delay;
+ bool required;
+};
+
+struct ath10k_wcn3990_clk_info {
+ struct clk *handle;
+ const char *name;
+ u32 freq;
+ bool required;
+};
+
+struct ath10k_snoc {
+ struct platform_device *dev;
+ struct ath10k *ar;
+ void __iomem *mem;
+ dma_addr_t mem_pa;
+ struct ath10k_snoc_target_info target_info;
+ size_t mem_len;
+ struct ath10k_snoc_pipe pipe_info[CE_COUNT_MAX];
+ struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX];
+ struct ath10k_ce ce;
+ struct timer_list rx_post_retry;
+ struct ath10k_wcn3990_vreg_info *vreg;
+ struct ath10k_wcn3990_clk_info *clk;
+};
+
+static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
+{
+ return (struct ath10k_snoc *)ar->drv_priv;
+}
+
+void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value);
+u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset);
+
+#endif /* _SNOC_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/spectral.c b/drivers/net/wireless/ath/ath10k/spectral.c
new file mode 100644
index 000000000..653b6d013
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/spectral.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (c) 2013-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/relay.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi-ops.h"
+
+static void send_fft_sample(struct ath10k *ar,
+ const struct fft_sample_tlv *fft_sample_tlv)
+{
+ int length;
+
+ if (!ar->spectral.rfs_chan_spec_scan)
+ return;
+
+ length = __be16_to_cpu(fft_sample_tlv->length) +
+ sizeof(*fft_sample_tlv);
+ relay_write(ar->spectral.rfs_chan_spec_scan, fft_sample_tlv, length);
+}
+
+static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
+ u8 *data)
+{
+ int dc_pos;
+ u8 max_exp;
+
+ dc_pos = bin_len / 2;
+
+ /* peak index outside of bins */
+ if (dc_pos < max_index || -dc_pos >= max_index)
+ return 0;
+
+ for (max_exp = 0; max_exp < 8; max_exp++) {
+ if (data[dc_pos + max_index] == (max_magnitude >> max_exp))
+ break;
+ }
+
+ /* max_exp not found */
+ if (data[dc_pos + max_index] != (max_magnitude >> max_exp))
+ return 0;
+
+ return max_exp;
+}
+
+static inline size_t ath10k_spectral_fix_bin_size(struct ath10k *ar,
+ size_t bin_len)
+{
+ /* some chipsets reports bin size as 2^n bytes + 'm' bytes in
+ * report mode 2. First 2^n bytes carries inband tones and last
+ * 'm' bytes carries band edge detection data mainly used in
+ * radar detection purpose. Strip last 'm' bytes to make bin size
+ * as a valid one. 'm' can take possible values of 4, 12.
+ */
+ if (!is_power_of_2(bin_len))
+ bin_len -= ar->hw_params.spectral_bin_discard;
+
+ return bin_len;
+}
+
+int ath10k_spectral_process_fft(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_fft_report *fftr,
+ size_t bin_len, u64 tsf)
+{
+ struct fft_sample_ath10k *fft_sample;
+ u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
+ u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
+ u32 reg0, reg1;
+ u8 chain_idx, *bins;
+ int dc_pos;
+
+ fft_sample = (struct fft_sample_ath10k *)&buf;
+
+ bin_len = ath10k_spectral_fix_bin_size(ar, bin_len);
+
+ if (bin_len < 64 || bin_len > SPECTRAL_ATH10K_MAX_NUM_BINS)
+ return -EINVAL;
+
+ reg0 = __le32_to_cpu(fftr->reg0);
+ reg1 = __le32_to_cpu(fftr->reg1);
+
+ length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + bin_len;
+ fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH10K;
+ fft_sample->tlv.length = __cpu_to_be16(length);
+
+ /* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
+ * but the results/plots suggest that its actually 22/44/88 MHz.
+ */
+ switch (phyerr->chan_width_mhz) {
+ case 20:
+ fft_sample->chan_width_mhz = 22;
+ break;
+ case 40:
+ fft_sample->chan_width_mhz = 44;
+ break;
+ case 80:
+ /* TODO: As experiments with an analogue sender and various
+ * configurations (fft-sizes of 64/128/256 and 20/40/80 Mhz)
+ * show, the particular configuration of 80 MHz/64 bins does
+ * not match with the other samples at all. Until the reason
+ * for that is found, don't report these samples.
+ */
+ if (bin_len == 64)
+ return -EINVAL;
+ fft_sample->chan_width_mhz = 88;
+ break;
+ default:
+ fft_sample->chan_width_mhz = phyerr->chan_width_mhz;
+ }
+
+ fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
+ fft_sample->avgpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB);
+
+ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+ fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
+ fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
+ fft_sample->rssi = phyerr->rssi_combined;
+
+ total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
+ base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
+ fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
+ fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
+
+ freq1 = phyerr->freq1;
+ freq2 = phyerr->freq2;
+ fft_sample->freq1 = __cpu_to_be16(freq1);
+ fft_sample->freq2 = __cpu_to_be16(freq2);
+
+ chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
+
+ fft_sample->noise = __cpu_to_be16(phyerr->nf_chains[chain_idx]);
+
+ bins = (u8 *)fftr;
+ bins += sizeof(*fftr) + ar->hw_params.spectral_bin_offset;
+
+ fft_sample->tsf = __cpu_to_be64(tsf);
+
+ /* max_exp has been directly reported by previous hardware (ath9k),
+ * maybe its possible to get it by other means?
+ */
+ fft_sample->max_exp = get_max_exp(fft_sample->max_index, peak_mag,
+ bin_len, bins);
+
+ memcpy(fft_sample->data, bins, bin_len);
+
+ /* DC value (value in the middle) is the blind spot of the spectral
+ * sample and invalid, interpolate it.
+ */
+ dc_pos = bin_len / 2;
+ fft_sample->data[dc_pos] = (fft_sample->data[dc_pos + 1] +
+ fft_sample->data[dc_pos - 1]) / 2;
+
+ send_fft_sample(ar, &fft_sample->tlv);
+
+ return 0;
+}
+
+static struct ath10k_vif *ath10k_get_spectral_vdev(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (list_empty(&ar->arvifs))
+ return NULL;
+
+ /* if there already is a vif doing spectral, return that. */
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->spectral_enabled)
+ return arvif;
+
+ /* otherwise, return the first vif. */
+ return list_first_entry(&ar->arvifs, typeof(*arvif), list);
+}
+
+static int ath10k_spectral_scan_trigger(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int res;
+ int vdev_id;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath10k_get_spectral_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+ vdev_id = arvif->vdev_id;
+
+ if (ar->spectral.mode == SPECTRAL_DISABLED)
+ return 0;
+
+ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (res < 0)
+ return res;
+
+ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+ WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
+ WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (res < 0)
+ return res;
+
+ return 0;
+}
+
+static int ath10k_spectral_scan_config(struct ath10k *ar,
+ enum ath10k_spectral_mode mode)
+{
+ struct wmi_vdev_spectral_conf_arg arg;
+ struct ath10k_vif *arvif;
+ int vdev_id, count, res = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath10k_get_spectral_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+
+ vdev_id = arvif->vdev_id;
+
+ arvif->spectral_enabled = (mode != SPECTRAL_DISABLED);
+ ar->spectral.mode = mode;
+
+ res = ath10k_wmi_vdev_spectral_enable(ar, vdev_id,
+ WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ WMI_SPECTRAL_ENABLE_CMD_DISABLE);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to enable spectral scan: %d\n", res);
+ return res;
+ }
+
+ if (mode == SPECTRAL_DISABLED)
+ return 0;
+
+ if (mode == SPECTRAL_BACKGROUND)
+ count = WMI_SPECTRAL_COUNT_DEFAULT;
+ else
+ count = max_t(u8, 1, ar->spectral.config.count);
+
+ arg.vdev_id = vdev_id;
+ arg.scan_count = count;
+ arg.scan_period = WMI_SPECTRAL_PERIOD_DEFAULT;
+ arg.scan_priority = WMI_SPECTRAL_PRIORITY_DEFAULT;
+ arg.scan_fft_size = ar->spectral.config.fft_size;
+ arg.scan_gc_ena = WMI_SPECTRAL_GC_ENA_DEFAULT;
+ arg.scan_restart_ena = WMI_SPECTRAL_RESTART_ENA_DEFAULT;
+ arg.scan_noise_floor_ref = WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+ arg.scan_init_delay = WMI_SPECTRAL_INIT_DELAY_DEFAULT;
+ arg.scan_nb_tone_thr = WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
+ arg.scan_str_bin_thr = WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
+ arg.scan_wb_rpt_mode = WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
+ arg.scan_rssi_rpt_mode = WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
+ arg.scan_rssi_thr = WMI_SPECTRAL_RSSI_THR_DEFAULT;
+ arg.scan_pwr_format = WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
+ arg.scan_rpt_mode = WMI_SPECTRAL_RPT_MODE_DEFAULT;
+ arg.scan_bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ arg.scan_dbm_adj = WMI_SPECTRAL_DBM_ADJ_DEFAULT;
+ arg.scan_chn_mask = WMI_SPECTRAL_CHN_MASK_DEFAULT;
+
+ res = ath10k_wmi_vdev_spectral_conf(ar, &arg);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to configure spectral scan: %d\n", res);
+ return res;
+ }
+
+ return 0;
+}
+
+static ssize_t read_file_spec_scan_ctl(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char *mode = "";
+ size_t len;
+ enum ath10k_spectral_mode spectral_mode;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_mode = ar->spectral.mode;
+ mutex_unlock(&ar->conf_mutex);
+
+ switch (spectral_mode) {
+ case SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ ssize_t len;
+ int res;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ if (ar->spectral.mode == SPECTRAL_MANUAL ||
+ ar->spectral.mode == SPECTRAL_BACKGROUND) {
+ /* reset the configuration to adopt possibly changed
+ * debugfs parameters
+ */
+ res = ath10k_spectral_scan_config(ar,
+ ar->spectral.mode);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to reconfigure spectral scan: %d\n",
+ res);
+ }
+ res = ath10k_spectral_scan_trigger(ar);
+ if (res < 0) {
+ ath10k_warn(ar, "failed to trigger spectral scan: %d\n",
+ res);
+ }
+ } else {
+ res = -EINVAL;
+ }
+ } else if (strncmp("background", buf, 10) == 0) {
+ res = ath10k_spectral_scan_config(ar, SPECTRAL_BACKGROUND);
+ } else if (strncmp("manual", buf, 6) == 0) {
+ res = ath10k_spectral_scan_config(ar, SPECTRAL_MANUAL);
+ } else if (strncmp("disable", buf, 7) == 0) {
+ res = ath10k_spectral_scan_config(ar, SPECTRAL_DISABLED);
+ } else {
+ res = -EINVAL;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ if (res < 0)
+ return res;
+
+ return count;
+}
+
+static const struct file_operations fops_spec_scan_ctl = {
+ .read = read_file_spec_scan_ctl,
+ .write = write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+ u8 spectral_count;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_count = ar->spectral.config.count;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", spectral_count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > 255)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.config.count = val;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_spectral_count = {
+ .read = read_file_spectral_count,
+ .write = write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t read_file_spectral_bins(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ unsigned int bins, fft_size, bin_scale;
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+
+ fft_size = ar->spectral.config.fft_size;
+ bin_scale = WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ bins = 1 << (fft_size - bin_scale);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", bins);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_spectral_bins(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < 64 || val > SPECTRAL_ATH10K_MAX_NUM_BINS)
+ return -EINVAL;
+
+ if (!is_power_of_2(val))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.config.fft_size = ilog2(val);
+ ar->spectral.config.fft_size += WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_spectral_bins = {
+ .read = read_file_spectral_bins,
+ .write = write_file_spectral_bins,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+static struct rchan_callbacks rfs_spec_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+int ath10k_spectral_start(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ arvif->spectral_enabled = 0;
+
+ ar->spectral.mode = SPECTRAL_DISABLED;
+ ar->spectral.config.count = WMI_SPECTRAL_COUNT_DEFAULT;
+ ar->spectral.config.fft_size = WMI_SPECTRAL_FFT_SIZE_DEFAULT;
+
+ return 0;
+}
+
+int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
+{
+ if (!arvif->spectral_enabled)
+ return 0;
+
+ return ath10k_spectral_scan_config(arvif->ar, SPECTRAL_DISABLED);
+}
+
+int ath10k_spectral_create(struct ath10k *ar)
+{
+ /* The buffer size covers whole channels in dual bands up to 128 bins.
+ * Scan with bigger than 128 bins needs to be run on single band each.
+ */
+ ar->spectral.rfs_chan_spec_scan = relay_open("spectral_scan",
+ ar->debug.debugfs_phy,
+ 1140, 2500,
+ &rfs_spec_scan_cb, NULL);
+ debugfs_create_file("spectral_scan_ctl",
+ 0600,
+ ar->debug.debugfs_phy, ar,
+ &fops_spec_scan_ctl);
+ debugfs_create_file("spectral_count",
+ 0600,
+ ar->debug.debugfs_phy, ar,
+ &fops_spectral_count);
+ debugfs_create_file("spectral_bins",
+ 0600,
+ ar->debug.debugfs_phy, ar,
+ &fops_spectral_bins);
+
+ return 0;
+}
+
+void ath10k_spectral_destroy(struct ath10k *ar)
+{
+ if (ar->spectral.rfs_chan_spec_scan) {
+ relay_close(ar->spectral.rfs_chan_spec_scan);
+ ar->spectral.rfs_chan_spec_scan = NULL;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath10k/spectral.h b/drivers/net/wireless/ath/ath10k/spectral.h
new file mode 100644
index 000000000..13276f4dc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/spectral.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 2013-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef SPECTRAL_H
+#define SPECTRAL_H
+
+#include "../spectral_common.h"
+
+/**
+ * struct ath10k_spec_scan - parameters for Atheros spectral scan
+ *
+ * @count: number of scan results requested for manual mode
+ * @fft_size: number of bins to be requested = 2^(fft_size - bin_scale)
+ */
+struct ath10k_spec_scan {
+ u8 count;
+ u8 fft_size;
+};
+
+/* enum ath10k_spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ */
+enum ath10k_spectral_mode {
+ SPECTRAL_DISABLED = 0,
+ SPECTRAL_BACKGROUND,
+ SPECTRAL_MANUAL,
+};
+
+#ifdef CONFIG_ATH10K_SPECTRAL
+
+int ath10k_spectral_process_fft(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_fft_report *fftr,
+ size_t bin_len, u64 tsf);
+int ath10k_spectral_start(struct ath10k *ar);
+int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
+int ath10k_spectral_create(struct ath10k *ar);
+void ath10k_spectral_destroy(struct ath10k *ar);
+
+#else
+
+static inline int
+ath10k_spectral_process_fft(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_fft_report *fftr,
+ size_t bin_len, u64 tsf)
+{
+ return 0;
+}
+
+static inline int ath10k_spectral_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_spectral_vif_stop(struct ath10k_vif *arvif)
+{
+ return 0;
+}
+
+static inline int ath10k_spectral_create(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_spectral_destroy(struct ath10k *ar)
+{
+}
+
+#endif /* CONFIG_ATH10K_SPECTRAL */
+
+#endif /* SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
new file mode 100644
index 000000000..e7f57efad
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* This file has implementation for code swap logic. With code swap feature,
+ * target can run the fw binary with even smaller IRAM size by using host
+ * memory to store some of the code segments.
+ */
+
+#include "core.h"
+#include "bmi.h"
+#include "debug.h"
+
+static int ath10k_swap_code_seg_fill(struct ath10k *ar,
+ struct ath10k_swap_code_seg_info *seg_info,
+ const void *data, size_t data_len)
+{
+ u8 *virt_addr = seg_info->virt_address[0];
+ u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
+ const u8 *fw_data = data;
+ union ath10k_swap_code_seg_item *swap_item;
+ u32 length = 0;
+ u32 payload_len;
+ u32 total_payload_len = 0;
+ u32 size_left = data_len;
+
+ /* Parse swap bin and copy the content to host allocated memory.
+ * The format is Address, length and value. The last 4-bytes is
+ * target write address. Currently address field is not used.
+ */
+ seg_info->target_addr = -1;
+ while (size_left >= sizeof(*swap_item)) {
+ swap_item = (union ath10k_swap_code_seg_item *)fw_data;
+ payload_len = __le32_to_cpu(swap_item->tlv.length);
+ if ((payload_len > size_left) ||
+ (payload_len == 0 &&
+ size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
+ ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
+ payload_len);
+ return -EINVAL;
+ }
+
+ if (payload_len == 0) {
+ if (memcmp(swap_item->tail.magic_signature, swap_magic,
+ ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
+ ath10k_err(ar, "refusing an invalid swap file\n");
+ return -EINVAL;
+ }
+ seg_info->target_addr =
+ __le32_to_cpu(swap_item->tail.bmi_write_addr);
+ break;
+ }
+
+ memcpy(virt_addr, swap_item->tlv.data, payload_len);
+ virt_addr += payload_len;
+ length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv);
+ size_left -= length;
+ fw_data += length;
+ total_payload_len += payload_len;
+ }
+
+ if (seg_info->target_addr == -1) {
+ ath10k_err(ar, "failed to parse invalid swap file\n");
+ return -EINVAL;
+ }
+ seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
+
+ return 0;
+}
+
+static void
+ath10k_swap_code_seg_free(struct ath10k *ar,
+ struct ath10k_swap_code_seg_info *seg_info)
+{
+ u32 seg_size;
+
+ if (!seg_info)
+ return;
+
+ if (!seg_info->virt_address[0])
+ return;
+
+ seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
+ dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
+ seg_info->paddr[0]);
+}
+
+static struct ath10k_swap_code_seg_info *
+ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
+{
+ struct ath10k_swap_code_seg_info *seg_info;
+ void *virt_addr;
+ dma_addr_t paddr;
+
+ swap_bin_len = roundup(swap_bin_len, 2);
+ if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
+ ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
+ swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
+ return NULL;
+ }
+
+ seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
+ if (!seg_info)
+ return NULL;
+
+ virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
+ GFP_KERNEL);
+ if (!virt_addr) {
+ ath10k_err(ar, "failed to allocate dma coherent memory\n");
+ return NULL;
+ }
+
+ seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
+ seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
+ seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
+ seg_info->seg_hw_info.num_segs =
+ __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
+ seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
+ seg_info->virt_address[0] = virt_addr;
+ seg_info->paddr[0] = paddr;
+
+ return seg_info;
+}
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ const struct ath10k_fw_file *fw_file)
+{
+ int ret;
+ struct ath10k_swap_code_seg_info *seg_info = NULL;
+
+ if (!fw_file->firmware_swap_code_seg_info)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+
+ seg_info = fw_file->firmware_swap_code_seg_info;
+
+ ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
+ &seg_info->seg_hw_info,
+ sizeof(seg_info->seg_hw_info));
+ if (ret) {
+ ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
+{
+ ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
+
+ /* FIXME: these two assignments look to bein wrong place! Shouldn't
+ * they be in ath10k_core_free_firmware_files() like the rest?
+ */
+ fw_file->codeswap_data = NULL;
+ fw_file->codeswap_len = 0;
+
+ fw_file->firmware_swap_code_seg_info = NULL;
+}
+
+int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
+{
+ int ret;
+ struct ath10k_swap_code_seg_info *seg_info;
+ const void *codeswap_data;
+ size_t codeswap_len;
+
+ codeswap_data = fw_file->codeswap_data;
+ codeswap_len = fw_file->codeswap_len;
+
+ if (!codeswap_len || !codeswap_data)
+ return 0;
+
+ seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len);
+ if (!seg_info) {
+ ath10k_err(ar, "failed to allocate fw code swap segment\n");
+ return -ENOMEM;
+ }
+
+ ret = ath10k_swap_code_seg_fill(ar, seg_info,
+ codeswap_data, codeswap_len);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
+ ret);
+ ath10k_swap_code_seg_free(ar, seg_info);
+ return ret;
+ }
+
+ fw_file->firmware_swap_code_seg_info = seg_info;
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
new file mode 100644
index 000000000..fa602f15f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _SWAP_H_
+#define _SWAP_H_
+
+#define ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX (512 * 1024)
+#define ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ 12
+#define ATH10K_SWAP_CODE_SEG_NUM_MAX 16
+/* Currently only one swap segment is supported */
+#define ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED 1
+
+struct ath10k_fw_file;
+
+struct ath10k_swap_code_seg_tlv {
+ __le32 address;
+ __le32 length;
+ u8 data[0];
+} __packed;
+
+struct ath10k_swap_code_seg_tail {
+ u8 magic_signature[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ];
+ __le32 bmi_write_addr;
+} __packed;
+
+union ath10k_swap_code_seg_item {
+ struct ath10k_swap_code_seg_tlv tlv;
+ struct ath10k_swap_code_seg_tail tail;
+} __packed;
+
+struct ath10k_swap_code_seg_hw_info {
+ /* Swap binary image size */
+ __le32 swap_size;
+ __le32 num_segs;
+
+ /* Swap data size */
+ __le32 size;
+ __le32 size_log2;
+ __le32 bus_addr[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+ __le64 reserved[ATH10K_SWAP_CODE_SEG_NUM_MAX];
+} __packed;
+
+struct ath10k_swap_code_seg_info {
+ struct ath10k_swap_code_seg_hw_info seg_hw_info;
+ void *virt_address[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+ u32 target_addr;
+ dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
+};
+
+int ath10k_swap_code_seg_configure(struct ath10k *ar,
+ const struct ath10k_fw_file *fw_file);
+void ath10k_swap_code_seg_release(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
+int ath10k_swap_code_seg_init(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
new file mode 100644
index 000000000..c2b5bad04
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -0,0 +1,490 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __TARGADDRS_H__
+#define __TARGADDRS_H__
+
+#include "hw.h"
+
+/*
+ * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
+ * host_interest structure. It must match the address of the _host_interest
+ * symbol (see linker script).
+ *
+ * Host Interest is shared between Host and Target in order to coordinate
+ * between the two, and is intended to remain constant (with additions only
+ * at the end) across software releases.
+ *
+ * All addresses are available here so that it's possible to
+ * write a single binary that works with all Target Types.
+ * May be used in assembler code as well as C.
+ */
+#define QCA988X_HOST_INTEREST_ADDRESS 0x00400800
+#define HOST_INTEREST_MAX_SIZE 0x200
+
+/*
+ * These are items that the Host may need to access via BMI or via the
+ * Diagnostic Window. The position of items in this structure must remain
+ * constant across firmware revisions! Types for each item must be fixed
+ * size across target and host platforms. More items may be added at the end.
+ */
+struct host_interest {
+ /*
+ * Pointer to application-defined area, if any.
+ * Set by Target application during startup.
+ */
+ u32 hi_app_host_interest; /* 0x00 */
+
+ /* Pointer to register dump area, valid after Target crash. */
+ u32 hi_failure_state; /* 0x04 */
+
+ /* Pointer to debug logging header */
+ u32 hi_dbglog_hdr; /* 0x08 */
+
+ u32 hi_unused0c; /* 0x0c */
+
+ /*
+ * General-purpose flag bits, similar to SOC_OPTION_* flags.
+ * Can be used by application rather than by OS.
+ */
+ u32 hi_option_flag; /* 0x10 */
+
+ /*
+ * Boolean that determines whether or not to
+ * display messages on the serial port.
+ */
+ u32 hi_serial_enable; /* 0x14 */
+
+ /* Start address of DataSet index, if any */
+ u32 hi_dset_list_head; /* 0x18 */
+
+ /* Override Target application start address */
+ u32 hi_app_start; /* 0x1c */
+
+ /* Clock and voltage tuning */
+ u32 hi_skip_clock_init; /* 0x20 */
+ u32 hi_core_clock_setting; /* 0x24 */
+ u32 hi_cpu_clock_setting; /* 0x28 */
+ u32 hi_system_sleep_setting; /* 0x2c */
+ u32 hi_xtal_control_setting; /* 0x30 */
+ u32 hi_pll_ctrl_setting_24ghz; /* 0x34 */
+ u32 hi_pll_ctrl_setting_5ghz; /* 0x38 */
+ u32 hi_ref_voltage_trim_setting; /* 0x3c */
+ u32 hi_clock_info; /* 0x40 */
+
+ /* Host uses BE CPU or not */
+ u32 hi_be; /* 0x44 */
+
+ u32 hi_stack; /* normal stack */ /* 0x48 */
+ u32 hi_err_stack; /* error stack */ /* 0x4c */
+ u32 hi_desired_cpu_speed_hz; /* 0x50 */
+
+ /* Pointer to Board Data */
+ u32 hi_board_data; /* 0x54 */
+
+ /*
+ * Indication of Board Data state:
+ * 0: board data is not yet initialized.
+ * 1: board data is initialized; unknown size
+ * >1: number of bytes of initialized board data
+ */
+ u32 hi_board_data_initialized; /* 0x58 */
+
+ u32 hi_dset_ram_index_table; /* 0x5c */
+
+ u32 hi_desired_baud_rate; /* 0x60 */
+ u32 hi_dbglog_config; /* 0x64 */
+ u32 hi_end_ram_reserve_sz; /* 0x68 */
+ u32 hi_mbox_io_block_sz; /* 0x6c */
+
+ u32 hi_num_bpatch_streams; /* 0x70 -- unused */
+ u32 hi_mbox_isr_yield_limit; /* 0x74 */
+
+ u32 hi_refclk_hz; /* 0x78 */
+ u32 hi_ext_clk_detected; /* 0x7c */
+ u32 hi_dbg_uart_txpin; /* 0x80 */
+ u32 hi_dbg_uart_rxpin; /* 0x84 */
+ u32 hi_hci_uart_baud; /* 0x88 */
+ u32 hi_hci_uart_pin_assignments; /* 0x8C */
+
+ u32 hi_hci_uart_baud_scale_val; /* 0x90 */
+ u32 hi_hci_uart_baud_step_val; /* 0x94 */
+
+ u32 hi_allocram_start; /* 0x98 */
+ u32 hi_allocram_sz; /* 0x9c */
+ u32 hi_hci_bridge_flags; /* 0xa0 */
+ u32 hi_hci_uart_support_pins; /* 0xa4 */
+
+ u32 hi_hci_uart_pwr_mgmt_params; /* 0xa8 */
+
+ /*
+ * 0xa8 - [1]: 0 = UART FC active low, 1 = UART FC active high
+ * [31:16]: wakeup timeout in ms
+ */
+ /* Pointer to extended board Data */
+ u32 hi_board_ext_data; /* 0xac */
+ u32 hi_board_ext_data_config; /* 0xb0 */
+ /*
+ * Bit [0] : valid
+ * Bit[31:16: size
+ */
+ /*
+ * hi_reset_flag is used to do some stuff when target reset.
+ * such as restore app_start after warm reset or
+ * preserve host Interest area, or preserve ROM data, literals etc.
+ */
+ u32 hi_reset_flag; /* 0xb4 */
+ /* indicate hi_reset_flag is valid */
+ u32 hi_reset_flag_valid; /* 0xb8 */
+ u32 hi_hci_uart_pwr_mgmt_params_ext; /* 0xbc */
+ /* 0xbc - [31:0]: idle timeout in ms */
+ /* ACS flags */
+ u32 hi_acs_flags; /* 0xc0 */
+ u32 hi_console_flags; /* 0xc4 */
+ u32 hi_nvram_state; /* 0xc8 */
+ u32 hi_option_flag2; /* 0xcc */
+
+ /* If non-zero, override values sent to Host in WMI_READY event. */
+ u32 hi_sw_version_override; /* 0xd0 */
+ u32 hi_abi_version_override; /* 0xd4 */
+
+ /*
+ * Percentage of high priority RX traffic to total expected RX traffic
+ * applicable only to ar6004
+ */
+ u32 hi_hp_rx_traffic_ratio; /* 0xd8 */
+
+ /* test applications flags */
+ u32 hi_test_apps_related; /* 0xdc */
+ /* location of test script */
+ u32 hi_ota_testscript; /* 0xe0 */
+ /* location of CAL data */
+ u32 hi_cal_data; /* 0xe4 */
+
+ /* Number of packet log buffers */
+ u32 hi_pktlog_num_buffers; /* 0xe8 */
+
+ /* wow extension configuration */
+ u32 hi_wow_ext_config; /* 0xec */
+ u32 hi_pwr_save_flags; /* 0xf0 */
+
+ /* Spatial Multiplexing Power Save (SMPS) options */
+ u32 hi_smps_options; /* 0xf4 */
+
+ /* Interconnect-specific state */
+ u32 hi_interconnect_state; /* 0xf8 */
+
+ /* Coex configuration flags */
+ u32 hi_coex_config; /* 0xfc */
+
+ /* Early allocation support */
+ u32 hi_early_alloc; /* 0x100 */
+ /* FW swap field */
+ /*
+ * Bits of this 32bit word will be used to pass specific swap
+ * instruction to FW
+ */
+ /*
+ * Bit 0 -- AP Nart descriptor no swap. When this bit is set
+ * FW will not swap TX descriptor. Meaning packets are formed
+ * on the target processor.
+ */
+ /* Bit 1 - unused */
+ u32 hi_fw_swap; /* 0x104 */
+
+ /* global arenas pointer address, used by host driver debug */
+ u32 hi_dynamic_mem_arenas_addr; /* 0x108 */
+
+ /* allocated bytes of DRAM use by allocated */
+ u32 hi_dynamic_mem_allocated; /* 0x10C */
+
+ /* remaining bytes of DRAM */
+ u32 hi_dynamic_mem_remaining; /* 0x110 */
+
+ /* memory track count, configured by host */
+ u32 hi_dynamic_mem_track_max; /* 0x114 */
+
+ /* minidump buffer */
+ u32 hi_minidump; /* 0x118 */
+
+ /* bdata's sig and key addr */
+ u32 hi_bd_sig_key; /* 0x11c */
+} __packed;
+
+#define HI_ITEM(item) offsetof(struct host_interest, item)
+
+/* Bits defined in hi_option_flag */
+
+/* Enable timer workaround */
+#define HI_OPTION_TIMER_WAR 0x01
+/* Limit BMI command credits */
+#define HI_OPTION_BMI_CRED_LIMIT 0x02
+/* Relay Dot11 hdr to/from host */
+#define HI_OPTION_RELAY_DOT11_HDR 0x04
+/* MAC addr method 0-locally administred 1-globally unique addrs */
+#define HI_OPTION_MAC_ADDR_METHOD 0x08
+/* Firmware Bridging */
+#define HI_OPTION_FW_BRIDGE 0x10
+/* Enable CPU profiling */
+#define HI_OPTION_ENABLE_PROFILE 0x20
+/* Disable debug logging */
+#define HI_OPTION_DISABLE_DBGLOG 0x40
+/* Skip Era Tracking */
+#define HI_OPTION_SKIP_ERA_TRACKING 0x80
+/* Disable PAPRD (debug) */
+#define HI_OPTION_PAPRD_DISABLE 0x100
+#define HI_OPTION_NUM_DEV_LSB 0x200
+#define HI_OPTION_NUM_DEV_MSB 0x800
+#define HI_OPTION_DEV_MODE_LSB 0x1000
+#define HI_OPTION_DEV_MODE_MSB 0x8000000
+/* Disable LowFreq Timer Stabilization */
+#define HI_OPTION_NO_LFT_STBL 0x10000000
+/* Skip regulatory scan */
+#define HI_OPTION_SKIP_REG_SCAN 0x20000000
+/*
+ * Do regulatory scan during init before
+ * sending WMI ready event to host
+ */
+#define HI_OPTION_INIT_REG_SCAN 0x40000000
+
+/* REV6: Do not adjust memory map */
+#define HI_OPTION_SKIP_MEMMAP 0x80000000
+
+#define HI_OPTION_MAC_ADDR_METHOD_SHIFT 3
+
+/* 2 bits of hi_option_flag are used to represent 3 modes */
+#define HI_OPTION_FW_MODE_IBSS 0x0 /* IBSS Mode */
+#define HI_OPTION_FW_MODE_BSS_STA 0x1 /* STA Mode */
+#define HI_OPTION_FW_MODE_AP 0x2 /* AP Mode */
+#define HI_OPTION_FW_MODE_BT30AMP 0x3 /* BT30 AMP Mode */
+
+/* 2 bits of hi_option flag are usedto represent 4 submodes */
+#define HI_OPTION_FW_SUBMODE_NONE 0x0 /* Normal mode */
+#define HI_OPTION_FW_SUBMODE_P2PDEV 0x1 /* p2p device mode */
+#define HI_OPTION_FW_SUBMODE_P2PCLIENT 0x2 /* p2p client mode */
+#define HI_OPTION_FW_SUBMODE_P2PGO 0x3 /* p2p go mode */
+
+/* Num dev Mask */
+#define HI_OPTION_NUM_DEV_MASK 0x7
+#define HI_OPTION_NUM_DEV_SHIFT 0x9
+
+/* firmware bridging */
+#define HI_OPTION_FW_BRIDGE_SHIFT 0x04
+
+/*
+ * Fw Mode/SubMode Mask
+ *-----------------------------------------------------------------------------
+ * SUB | SUB | SUB | SUB | | | |
+ *MODE[3] | MODE[2] | MODE[1] | MODE[0] | MODE[3] | MODE[2] | MODE[1] | MODE[0]
+ * (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
+ *-----------------------------------------------------------------------------
+ */
+#define HI_OPTION_FW_MODE_BITS 0x2
+#define HI_OPTION_FW_MODE_MASK 0x3
+#define HI_OPTION_FW_MODE_SHIFT 0xC
+#define HI_OPTION_ALL_FW_MODE_MASK 0xFF
+
+#define HI_OPTION_FW_SUBMODE_BITS 0x2
+#define HI_OPTION_FW_SUBMODE_MASK 0x3
+#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
+#define HI_OPTION_ALL_FW_SUBMODE_MASK 0xFF00
+#define HI_OPTION_ALL_FW_SUBMODE_SHIFT 0x8
+
+/* hi_option_flag2 options */
+#define HI_OPTION_OFFLOAD_AMSDU 0x01
+#define HI_OPTION_DFS_SUPPORT 0x02 /* Enable DFS support */
+#define HI_OPTION_ENABLE_RFKILL 0x04 /* RFKill Enable Feature*/
+#define HI_OPTION_RADIO_RETENTION_DISABLE 0x08 /* Disable radio retention */
+#define HI_OPTION_EARLY_CFG_DONE 0x10 /* Early configuration is complete */
+
+#define HI_OPTION_RF_KILL_SHIFT 0x2
+#define HI_OPTION_RF_KILL_MASK 0x1
+
+/* hi_reset_flag */
+/* preserve App Start address */
+#define HI_RESET_FLAG_PRESERVE_APP_START 0x01
+/* preserve host interest */
+#define HI_RESET_FLAG_PRESERVE_HOST_INTEREST 0x02
+/* preserve ROM data */
+#define HI_RESET_FLAG_PRESERVE_ROMDATA 0x04
+#define HI_RESET_FLAG_PRESERVE_NVRAM_STATE 0x08
+#define HI_RESET_FLAG_PRESERVE_BOOT_INFO 0x10
+#define HI_RESET_FLAG_WARM_RESET 0x20
+
+/* define hi_fw_swap bits */
+#define HI_DESC_IN_FW_BIT 0x01
+
+/* indicate the reset flag is valid */
+#define HI_RESET_FLAG_IS_VALID 0x12345678
+
+/* ACS is enabled */
+#define HI_ACS_FLAGS_ENABLED (1 << 0)
+/* Use physical WWAN device */
+#define HI_ACS_FLAGS_USE_WWAN (1 << 1)
+/* Use test VAP */
+#define HI_ACS_FLAGS_TEST_VAP (1 << 2)
+/* SDIO/mailbox ACS flag definitions */
+#define HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_SET (1 << 0)
+#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_SET (1 << 1)
+#define HI_ACS_FLAGS_ALT_DATA_CREDIT_SIZE (1 << 2)
+#define HI_ACS_FLAGS_SDIO_SWAP_MAILBOX_FW_ACK (1 << 16)
+#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK (1 << 17)
+
+/*
+ * CONSOLE FLAGS
+ *
+ * Bit Range Meaning
+ * --------- --------------------------------
+ * 2..0 UART ID (0 = Default)
+ * 3 Baud Select (0 = 9600, 1 = 115200)
+ * 30..4 Reserved
+ * 31 Enable Console
+ *
+ */
+
+#define HI_CONSOLE_FLAGS_ENABLE (1 << 31)
+#define HI_CONSOLE_FLAGS_UART_MASK (0x7)
+#define HI_CONSOLE_FLAGS_UART_SHIFT 0
+#define HI_CONSOLE_FLAGS_BAUD_SELECT (1 << 3)
+
+/* SM power save options */
+#define HI_SMPS_ALLOW_MASK (0x00000001)
+#define HI_SMPS_MODE_MASK (0x00000002)
+#define HI_SMPS_MODE_STATIC (0x00000000)
+#define HI_SMPS_MODE_DYNAMIC (0x00000002)
+#define HI_SMPS_DISABLE_AUTO_MODE (0x00000004)
+#define HI_SMPS_DATA_THRESH_MASK (0x000007f8)
+#define HI_SMPS_DATA_THRESH_SHIFT (3)
+#define HI_SMPS_RSSI_THRESH_MASK (0x0007f800)
+#define HI_SMPS_RSSI_THRESH_SHIFT (11)
+#define HI_SMPS_LOWPWR_CM_MASK (0x00380000)
+#define HI_SMPS_LOWPWR_CM_SHIFT (15)
+#define HI_SMPS_HIPWR_CM_MASK (0x03c00000)
+#define HI_SMPS_HIPWR_CM_SHIFT (19)
+
+/*
+ * WOW Extension configuration
+ *
+ * Bit Range Meaning
+ * --------- --------------------------------
+ * 8..0 Size of each WOW pattern (max 511)
+ * 15..9 Number of patterns per list (max 127)
+ * 17..16 Number of lists (max 4)
+ * 30..18 Reserved
+ * 31 Enabled
+ *
+ * set values (except enable) to zeros for default settings
+ */
+
+#define HI_WOW_EXT_ENABLED_MASK (1 << 31)
+#define HI_WOW_EXT_NUM_LIST_SHIFT 16
+#define HI_WOW_EXT_NUM_LIST_MASK (0x3 << HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_NUM_PATTERNS_SHIFT 9
+#define HI_WOW_EXT_NUM_PATTERNS_MASK (0x7F << HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_PATTERN_SIZE_SHIFT 0
+#define HI_WOW_EXT_PATTERN_SIZE_MASK (0x1FF << HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+#define HI_WOW_EXT_MAKE_CONFIG(num_lists, count, size) \
+ ((((num_lists) << HI_WOW_EXT_NUM_LIST_SHIFT) & \
+ HI_WOW_EXT_NUM_LIST_MASK) | \
+ (((count) << HI_WOW_EXT_NUM_PATTERNS_SHIFT) & \
+ HI_WOW_EXT_NUM_PATTERNS_MASK) | \
+ (((size) << HI_WOW_EXT_PATTERN_SIZE_SHIFT) & \
+ HI_WOW_EXT_PATTERN_SIZE_MASK))
+
+#define HI_WOW_EXT_GET_NUM_LISTS(config) \
+ (((config) & HI_WOW_EXT_NUM_LIST_MASK) >> HI_WOW_EXT_NUM_LIST_SHIFT)
+#define HI_WOW_EXT_GET_NUM_PATTERNS(config) \
+ (((config) & HI_WOW_EXT_NUM_PATTERNS_MASK) >> \
+ HI_WOW_EXT_NUM_PATTERNS_SHIFT)
+#define HI_WOW_EXT_GET_PATTERN_SIZE(config) \
+ (((config) & HI_WOW_EXT_PATTERN_SIZE_MASK) >> \
+ HI_WOW_EXT_PATTERN_SIZE_SHIFT)
+
+/*
+ * Early allocation configuration
+ * Support RAM bank configuration before BMI done and this eases the memory
+ * allocation at very early stage
+ * Bit Range Meaning
+ * --------- ----------------------------------
+ * [0:3] number of bank assigned to be IRAM
+ * [4:15] reserved
+ * [16:31] magic number
+ *
+ * Note:
+ * 1. target firmware would check magic number and if it's a match, firmware
+ * would consider the bits[0:15] are valid and base on that to calculate
+ * the end of DRAM. Early allocation would be located at that area and
+ * may be reclaimed when necessary
+ * 2. if no magic number is found, early allocation would happen at "_end"
+ * symbol of ROM which is located before the app-data and might NOT be
+ * re-claimable. If this is adopted, link script should keep this in
+ * mind to avoid data corruption.
+ */
+#define HI_EARLY_ALLOC_MAGIC 0x6d8a
+#define HI_EARLY_ALLOC_MAGIC_MASK 0xffff0000
+#define HI_EARLY_ALLOC_MAGIC_SHIFT 16
+#define HI_EARLY_ALLOC_IRAM_BANKS_MASK 0x0000000f
+#define HI_EARLY_ALLOC_IRAM_BANKS_SHIFT 0
+
+#define HI_EARLY_ALLOC_VALID() \
+ ((((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_MAGIC_MASK) >> \
+ HI_EARLY_ALLOC_MAGIC_SHIFT) == (HI_EARLY_ALLOC_MAGIC))
+#define HI_EARLY_ALLOC_GET_IRAM_BANKS() \
+ (((HOST_INTEREST->hi_early_alloc) & HI_EARLY_ALLOC_IRAM_BANKS_MASK) \
+ >> HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
+
+/*power save flag bit definitions*/
+#define HI_PWR_SAVE_LPL_ENABLED 0x1
+/*b1-b3 reserved*/
+/*b4-b5 : dev0 LPL type : 0 - none
+ * 1- Reduce Pwr Search
+ * 2- Reduce Pwr Listen
+ */
+/*b6-b7 : dev1 LPL type and so on for Max 8 devices*/
+#define HI_PWR_SAVE_LPL_DEV0_LSB 4
+#define HI_PWR_SAVE_LPL_DEV_MASK 0x3
+/*power save related utility macros*/
+#define HI_LPL_ENABLED() \
+ ((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
+#define HI_DEV_LPL_TYPE_GET(_devix) \
+ (HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
+ (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2)))
+
+#define HOST_INTEREST_SMPS_IS_ALLOWED() \
+ ((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
+
+/* Reserve 1024 bytes for extended board data */
+#define QCA988X_BOARD_DATA_SZ 7168
+#define QCA988X_BOARD_EXT_DATA_SZ 0
+
+#define QCA9887_BOARD_DATA_SZ 7168
+#define QCA9887_BOARD_EXT_DATA_SZ 0
+
+#define QCA6174_BOARD_DATA_SZ 8192
+#define QCA6174_BOARD_EXT_DATA_SZ 0
+
+#define QCA9377_BOARD_DATA_SZ QCA6174_BOARD_DATA_SZ
+#define QCA9377_BOARD_EXT_DATA_SZ 0
+
+#define QCA99X0_BOARD_DATA_SZ 12288
+#define QCA99X0_BOARD_EXT_DATA_SZ 0
+
+#define QCA4019_BOARD_DATA_SZ 12064
+#define QCA4019_BOARD_EXT_DATA_SZ 0
+
+#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
new file mode 100644
index 000000000..c24ee6168
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -0,0 +1,465 @@
+/*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "testmode.h"
+
+#include <net/netlink.h>
+#include <linux/firmware.h>
+
+#include "debug.h"
+#include "wmi.h"
+#include "hif.h"
+#include "hw.h"
+#include "core.h"
+
+#include "testmode_i.h"
+
+static const struct nla_policy ath10k_tm_policy[ATH10K_TM_ATTR_MAX + 1] = {
+ [ATH10K_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH10K_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH10K_TM_DATA_MAX_LEN },
+ [ATH10K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH10K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH10K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+/* Returns true if callee consumes the skb and the skb should be discarded.
+ * Returns false if skb is not used. Does not sleep.
+ */
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ bool consumed;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode event wmi cmd_id %d skb %pK skb->len %d\n",
+ cmd_id, skb, skb->len);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!ar->testmode.utf_monitor) {
+ consumed = false;
+ goto out;
+ }
+
+ /* Only testmode.c should be handling events from utf firmware,
+ * otherwise all sort of problems will arise as mac80211 operations
+ * are not initialised.
+ */
+ consumed = true;
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * sizeof(u32) + skb->len,
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath10k_warn(ar,
+ "failed to allocate skb for testmode wmi event\n");
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_CMD, ATH10K_TM_CMD_WMI);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to to put testmode wmi event cmd attribute: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put_u32(nl_skb, ATH10K_TM_ATTR_WMI_CMDID, cmd_id);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to to put testmode wmi even cmd_id: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ ret = nla_put(nl_skb, ATH10K_TM_ATTR_DATA, skb->len, skb->data);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to copy skb to testmode wmi event: %d\n",
+ ret);
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+
+ return consumed;
+}
+
+static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode cmd get version_major %d version_minor %d\n",
+ ATH10K_TESTMODE_VERSION_MAJOR,
+ ATH10K_TESTMODE_VERSION_MINOR);
+
+ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
+ nla_total_size(sizeof(u32)));
+ if (!skb)
+ return -ENOMEM;
+
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MAJOR,
+ ATH10K_TESTMODE_VERSION_MAJOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_VERSION_MINOR,
+ ATH10K_TESTMODE_VERSION_MINOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH10K_TM_ATTR_WMI_OP_VERSION,
+ ar->normal_mode_fw.fw_file.wmi_op_version);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
+{
+ char filename[100];
+ int ret;
+
+ snprintf(filename, sizeof(filename), "%s/%s",
+ ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
+
+ /* load utf firmware image */
+ ret = firmware_request_nowarn(&fw_file->firmware, filename, ar->dev);
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode fw request '%s': %d\n",
+ filename, ret);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
+ filename, ret);
+ return ret;
+ }
+
+ /* We didn't find FW UTF API 1 ("utf.bin") does not advertise
+ * firmware features. Do an ugly hack where we force the firmware
+ * features to match with 10.1 branch so that wmi.c will use the
+ * correct WMI interface.
+ */
+
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ fw_file->firmware_data = fw_file->firmware->data;
+ fw_file->firmware_len = fw_file->firmware->size;
+
+ return 0;
+}
+
+static int ath10k_tm_fetch_firmware(struct ath10k *ar)
+{
+ struct ath10k_fw_components *utf_mode_fw;
+ int ret;
+
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE,
+ &ar->testmode.utf_mode_fw.fw_file);
+ if (ret == 0) {
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
+ goto out;
+ }
+
+ ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
+
+out:
+ utf_mode_fw = &ar->testmode.utf_mode_fw;
+
+ /* Use the same board data file as the normal firmware uses (but
+ * it's still "owned" by normal_mode_fw so we shouldn't free it.
+ */
+ utf_mode_fw->board_data = ar->normal_mode_fw.board_data;
+ utf_mode_fw->board_len = ar->normal_mode_fw.board_len;
+
+ if (!utf_mode_fw->fw_file.otp_data) {
+ ath10k_info(ar, "utf.bin didn't contain otp binary, taking it from the normal mode firmware");
+ utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data;
+ utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len;
+ }
+
+ return 0;
+}
+
+static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
+{
+ const char *ver;
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_UTF) {
+ ret = -EALREADY;
+ goto err;
+ }
+
+ /* start utf only when the driver is not in use */
+ if (ar->state != ATH10K_STATE_OFF) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) {
+ /* utf image is already downloaded, it shouldn't be */
+ ret = -EEXIST;
+ goto err;
+ }
+
+ ret = ath10k_tm_fetch_firmware(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to fetch UTF firmware: %d", ret);
+ goto err;
+ }
+
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len) {
+ ret = ath10k_swap_code_seg_init(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+ if (ret) {
+ ath10k_warn(ar,
+ "failed to init utf code swap segment: %d\n",
+ ret);
+ goto err_release_utf_mode_fw;
+ }
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ ar->testmode.utf_monitor = true;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
+ ar->testmode.utf_mode_fw.fw_file.wmi_op_version);
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
+ ar->state = ATH10K_STATE_OFF;
+ goto err_release_utf_mode_fw;
+ }
+
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
+ &ar->testmode.utf_mode_fw);
+ if (ret) {
+ ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
+ ar->state = ATH10K_STATE_OFF;
+ goto err_power_down;
+ }
+
+ ar->state = ATH10K_STATE_UTF;
+
+ if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0)
+ ver = ar->testmode.utf_mode_fw.fw_file.fw_version;
+ else
+ ver = "API 1";
+
+ ath10k_info(ar, "UTF firmware %s started\n", ver);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_power_down:
+ ath10k_hif_power_down(ar);
+
+err_release_utf_mode_fw:
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+
+ spin_lock_bh(&ar->data_lock);
+
+ ar->testmode.utf_monitor = false;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ar->testmode.utf_mode_fw.fw_file.codeswap_data &&
+ ar->testmode.utf_mode_fw.fw_file.codeswap_len)
+ ath10k_swap_code_seg_release(ar,
+ &ar->testmode.utf_mode_fw.fw_file);
+
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
+
+ ar->state = ATH10K_STATE_OFF;
+}
+
+static int ath10k_tm_cmd_utf_stop(struct ath10k *ar, struct nlattr *tb[])
+{
+ int ret;
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf stop\n");
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ __ath10k_tm_cmd_utf_stop(ar);
+
+ ret = 0;
+
+ ath10k_info(ar, "UTF firmware stopped\n");
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_tm_cmd_wmi(struct ath10k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret, buf_len;
+ u32 cmd_id;
+ void *buf;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (!tb[ATH10K_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!tb[ATH10K_TM_ATTR_WMI_CMDID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH10K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH10K_TM_ATTR_DATA]);
+ cmd_id = nla_get_u32(tb[ATH10K_TM_ATTR_WMI_CMDID]);
+
+ ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
+ "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+ cmd_id, buf, buf_len);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, buf_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath10k_wmi_cmd_send(ar, skb, cmd_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath10k *ar = hw->priv;
+ struct nlattr *tb[ATH10K_TM_ATTR_MAX + 1];
+ int ret;
+
+ ret = nla_parse(tb, ATH10K_TM_ATTR_MAX, data, len, ath10k_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH10K_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[ATH10K_TM_ATTR_CMD])) {
+ case ATH10K_TM_CMD_GET_VERSION:
+ return ath10k_tm_cmd_get_version(ar, tb);
+ case ATH10K_TM_CMD_UTF_START:
+ return ath10k_tm_cmd_utf_start(ar, tb);
+ case ATH10K_TM_CMD_UTF_STOP:
+ return ath10k_tm_cmd_utf_stop(ar, tb);
+ case ATH10K_TM_CMD_WMI:
+ return ath10k_tm_cmd_wmi(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void ath10k_testmode_destroy(struct ath10k *ar)
+{
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_UTF) {
+ /* utf firmware is not running, nothing to do */
+ goto out;
+ }
+
+ __ath10k_tm_cmd_utf_stop(ar);
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+}
diff --git a/drivers/net/wireless/ath/ath10k/testmode.h b/drivers/net/wireless/ath/ath10k/testmode.h
new file mode 100644
index 000000000..9cdd15081
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath10k_testmode_destroy(struct ath10k *ar);
+
+bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id, struct sk_buff *skb);
+int ath10k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline void ath10k_testmode_destroy(struct ath10k *ar)
+{
+}
+
+static inline bool ath10k_tm_event_wmi(struct ath10k *ar, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline int ath10k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/testmode_i.h b/drivers/net/wireless/ath/ath10k/testmode_i.h
new file mode 100644
index 000000000..6514d1a14
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/testmode_i.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* "API" level of the ath10k testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH10K_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH10K_TESTMODE_VERSION_MINOR 0
+
+#define ATH10K_TM_DATA_MAX_LEN 5000
+
+enum ath10k_tm_attr {
+ __ATH10K_TM_ATTR_INVALID = 0,
+ ATH10K_TM_ATTR_CMD = 1,
+ ATH10K_TM_ATTR_DATA = 2,
+ ATH10K_TM_ATTR_WMI_CMDID = 3,
+ ATH10K_TM_ATTR_VERSION_MAJOR = 4,
+ ATH10K_TM_ATTR_VERSION_MINOR = 5,
+ ATH10K_TM_ATTR_WMI_OP_VERSION = 6,
+
+ /* keep last */
+ __ATH10K_TM_ATTR_AFTER_LAST,
+ ATH10K_TM_ATTR_MAX = __ATH10K_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath10k testmode interface commands specified in
+ * ATH10K_TM_ATTR_CMD
+ */
+enum ath10k_tm_cmd {
+ /* Returns the supported ath10k testmode interface version in
+ * ATH10K_TM_ATTR_VERSION. Always guaranteed to work. User space
+ * uses this to verify it's using the correct version of the
+ * testmode interface
+ */
+ ATH10K_TM_CMD_GET_VERSION = 0,
+
+ /* Boots the UTF firmware, the netdev interface must be down at the
+ * time.
+ */
+ ATH10K_TM_CMD_UTF_START = 1,
+
+ /* Shuts down the UTF firmware and puts the driver back into OFF
+ * state.
+ */
+ ATH10K_TM_CMD_UTF_STOP = 2,
+
+ /* The command used to transmit a WMI command to the firmware and
+ * the event to receive WMI events from the firmware. Without
+ * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+ * provided with ATH10K_TM_ATTR_WMI_CMDID and payload in
+ * ATH10K_TM_ATTR_DATA.
+ */
+ ATH10K_TM_CMD_WMI = 3,
+};
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
new file mode 100644
index 000000000..aa8978a8d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+#include "wmi-ops.h"
+
+static int
+ath10k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ATH10K_THERMAL_THROTTLE_MAX;
+
+ return 0;
+}
+
+static int
+ath10k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct ath10k *ar = cdev->devdata;
+
+ mutex_lock(&ar->conf_mutex);
+ *state = ar->thermal.throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath10k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long throttle_state)
+{
+ struct ath10k *ar = cdev->devdata;
+
+ if (throttle_state > ATH10K_THERMAL_THROTTLE_MAX) {
+ ath10k_warn(ar, "throttle state %ld is exceeding the limit %d\n",
+ throttle_state, ATH10K_THERMAL_THROTTLE_MAX);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ar->thermal.throttle_state = throttle_state;
+ ath10k_thermal_set_throttling(ar);
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops ath10k_thermal_ops = {
+ .get_max_state = ath10k_thermal_get_max_throttle_state,
+ .get_cur_state = ath10k_thermal_get_cur_throttle_state,
+ .set_cur_state = ath10k_thermal_set_cur_throttle_state,
+};
+
+static ssize_t ath10k_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ath10k *ar = dev_get_drvdata(dev);
+ int ret, temperature;
+ unsigned long time_left;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Can't get temperature when the card is off */
+ if (ar->state != ATH10K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ reinit_completion(&ar->thermal.wmi_sync);
+ ret = ath10k_wmi_pdev_get_temperature(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to read temperature %d\n", ret);
+ goto out;
+ }
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH10K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_warn(ar, "failed to synchronize thermal read\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ temperature = ar->thermal.temperature;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* display in millidegree celcius */
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->thermal.temperature = temperature;
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath10k_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *ath10k_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ath10k_hwmon);
+
+void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+ u32 period, duration, enabled;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+ return;
+
+ if (ar->state != ATH10K_STATE_ON)
+ return;
+
+ period = ar->thermal.quiet_period;
+ duration = (period * ar->thermal.throttle_state) / 100;
+ enabled = duration ? 1 : 0;
+
+ ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
+ ATH10K_QUIET_START_OFFSET,
+ enabled);
+ if (ret) {
+ ath10k_warn(ar, "failed to set quiet mode period %u duarion %u enabled %u ret %d\n",
+ period, duration, enabled, ret);
+ }
+}
+
+int ath10k_thermal_register(struct ath10k *ar)
+{
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon_dev;
+ int ret;
+
+ cdev = thermal_cooling_device_register("ath10k_thermal", ar,
+ &ath10k_thermal_ops);
+
+ if (IS_ERR(cdev)) {
+ ath10k_err(ar, "failed to setup thermal device result: %ld\n",
+ PTR_ERR(cdev));
+ return -EINVAL;
+ }
+
+ ret = sysfs_create_link(&ar->dev->kobj, &cdev->device.kobj,
+ "cooling_device");
+ if (ret) {
+ ath10k_err(ar, "failed to create cooling device symlink\n");
+ goto err_cooling_destroy;
+ }
+
+ ar->thermal.cdev = cdev;
+ ar->thermal.quiet_period = ATH10K_QUIET_PERIOD_DEFAULT;
+
+ /* Do not register hwmon device when temperature reading is not
+ * supported by firmware
+ */
+ if (!(ar->wmi.ops->gen_pdev_get_temperature))
+ return 0;
+
+ /* Avoid linking error on devm_hwmon_device_register_with_groups, I
+ * guess linux/hwmon.h is missing proper stubs.
+ */
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
+ "ath10k_hwmon", ar,
+ ath10k_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ath10k_err(ar, "failed to register hwmon device: %ld\n",
+ PTR_ERR(hwmon_dev));
+ ret = -EINVAL;
+ goto err_remove_link;
+ }
+ return 0;
+
+err_remove_link:
+ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+err_cooling_destroy:
+ thermal_cooling_device_unregister(cdev);
+ return ret;
+}
+
+void ath10k_thermal_unregister(struct ath10k *ar)
+{
+ sysfs_remove_link(&ar->dev->kobj, "cooling_device");
+ thermal_cooling_device_unregister(ar->thermal.cdev);
+}
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
new file mode 100644
index 000000000..65e241954
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _THERMAL_
+#define _THERMAL_
+
+#define ATH10K_QUIET_PERIOD_DEFAULT 100
+#define ATH10K_QUIET_PERIOD_MIN 25
+#define ATH10K_QUIET_START_OFFSET 10
+#define ATH10K_HWMON_NAME_LEN 15
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_THERMAL_THROTTLE_MAX 100
+
+struct ath10k_thermal {
+ struct thermal_cooling_device *cdev;
+ struct completion wmi_sync;
+
+ /* protected by conf_mutex */
+ u32 throttle_state;
+ u32 quiet_period;
+ /* temperature value in Celcius degree
+ * protected by data_lock
+ */
+ int temperature;
+};
+
+#if IS_REACHABLE(CONFIG_THERMAL)
+int ath10k_thermal_register(struct ath10k *ar);
+void ath10k_thermal_unregister(struct ath10k *ar);
+void ath10k_thermal_event_temperature(struct ath10k *ar, int temperature);
+void ath10k_thermal_set_throttling(struct ath10k *ar);
+#else
+static inline int ath10k_thermal_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline void ath10k_thermal_unregister(struct ath10k *ar)
+{
+}
+
+static inline void ath10k_thermal_event_temperature(struct ath10k *ar,
+ int temperature)
+{
+}
+
+static inline void ath10k_thermal_set_throttling(struct ath10k *ar)
+{
+}
+
+#endif
+#endif /* _THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath10k/trace.c b/drivers/net/wireless/ath/ath10k/trace.c
new file mode 100644
index 000000000..4a31e2c6f
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.c
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
new file mode 100644
index 000000000..7d2fac342
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -0,0 +1,544 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#if !defined(_TRACE_H_)
+static inline u32 ath10k_frm_hdr_len(const void *buf, size_t len)
+{
+ const struct ieee80211_hdr *hdr = buf;
+
+ /* In some rare cases (e.g. fcs error) device reports frame buffer
+ * shorter than what frame header implies (e.g. len = 0). The buffer
+ * can still be accessed so do a simple min() to guarantee caller
+ * doesn't get value greater than len.
+ */
+ return min_t(u32, len, ieee80211_hdrlen(hdr->frame_control));
+}
+#endif
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH10K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH10K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath10k
+
+#define ATH10K_MSG_MAX 400
+
+DECLARE_EVENT_CLASS(ath10k_log_event,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf),
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH10K_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH10K_MSG_MAX);
+ ),
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_err,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_warn,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf)
+);
+
+DEFINE_EVENT(ath10k_log_event, ath10k_log_info,
+ TP_PROTO(struct ath10k *ar, struct va_format *vaf),
+ TP_ARGS(ar, vaf)
+);
+
+TRACE_EVENT(ath10k_log_dbg,
+ TP_PROTO(struct ath10k *ar, unsigned int level, struct va_format *vaf),
+ TP_ARGS(ar, level, vaf),
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(unsigned int, level)
+ __dynamic_array(char, msg, ATH10K_MSG_MAX)
+ ),
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->level = level;
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH10K_MSG_MAX,
+ vaf->fmt,
+ *vaf->va) >= ATH10K_MSG_MAX);
+ ),
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath10k_log_dbg_dump,
+ TP_PROTO(struct ath10k *ar, const char *msg, const char *prefix,
+ const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, msg, prefix, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __string(msg, msg)
+ __string(prefix, prefix)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(msg, msg);
+ __assign_str(prefix, prefix);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %s/%s\n",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(prefix),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_cmd,
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_event,
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_stats,
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_dbglog,
+ TP_PROTO(struct ath10k *ar, const void *buf, size_t buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type);
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->hw_type = ar->hw_rev;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->hw_type,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_pktlog,
+ TP_PROTO(struct ath10k *ar, const void *buf, u16 buf_len),
+
+ TP_ARGS(ar, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type);
+ __field(u16, buf_len)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->hw_type = ar->hw_rev;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %d size %hu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->hw_type,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath10k_htt_tx,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
+ u8 vdev_id, u8 tid),
+
+ TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ __field(u16, msdu_len)
+ __field(u8, vdev_id)
+ __field(u8, tid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->msdu_id = msdu_id;
+ __entry->msdu_len = msdu_len;
+ __entry->vdev_id = vdev_id;
+ __entry->tid = tid;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d msdu_len %d vdev_id %d tid %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id,
+ __entry->msdu_len,
+ __entry->vdev_id,
+ __entry->tid
+ )
+);
+
+TRACE_EVENT(ath10k_txrx_tx_unref,
+ TP_PROTO(struct ath10k *ar, u16 msdu_id),
+
+ TP_ARGS(ar, msdu_id),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, msdu_id)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->msdu_id = msdu_id;
+ ),
+
+ TP_printk(
+ "%s %s msdu_id %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->msdu_id
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_hdr_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, data, ath10k_frm_hdr_len(data, len))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = ath10k_frm_hdr_len(data, len);
+ memcpy(__get_dynamic_array(data), data, __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DECLARE_EVENT_CLASS(ath10k_payload_event,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(size_t, len)
+ __dynamic_array(u8, payload, (len -
+ ath10k_frm_hdr_len(data, len)))
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len - ath10k_frm_hdr_len(data, len);
+ memcpy(__get_dynamic_array(payload),
+ data + ath10k_frm_hdr_len(data, len), __entry->len);
+ ),
+
+ TP_printk(
+ "%s %s len %zu\n",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_tx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_tx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_hdr_event, ath10k_rx_hdr,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+DEFINE_EVENT(ath10k_payload_event, ath10k_rx_payload,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+ TP_ARGS(ar, data, len)
+);
+
+TRACE_EVENT(ath10k_htt_rx_desc,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, hw_type);
+ __field(u16, len)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->hw_type = ar->hw_rev;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s %d rxdesc len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->hw_type,
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_diag_container,
+ TP_PROTO(struct ath10k *ar,
+ u8 type,
+ u32 timestamp,
+ u32 code,
+ u16 len,
+ const void *data),
+
+ TP_ARGS(ar, type, timestamp, code, len, data),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u8, type)
+ __field(u32, timestamp)
+ __field(u32, code)
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->type = type;
+ __entry->timestamp = timestamp;
+ __entry->code = code;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s diag container type %hhu timestamp %u code %u len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->type,
+ __entry->timestamp,
+ __entry->code,
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath10k_wmi_diag,
+ TP_PROTO(struct ath10k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->dev))
+ __string(driver, dev_driver_string(ar->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->dev));
+ __assign_str(driver, dev_driver_string(ar->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
new file mode 100644
index 000000000..888a8f4af
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "core.h"
+#include "txrx.h"
+#include "htt.h"
+#include "mac.h"
+#include "debug.h"
+
+static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)))
+ return;
+
+ if (ath10k_mac_tx_frm_has_freq(ar))
+ return;
+
+ /* If the original wait_for_completion() timed out before
+ * {data,mgmt}_tx_completed() was called then we could complete
+ * offchan_tx_completed for a different skb. Prevent this by using
+ * offchan_tx_skb.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->offchan_tx_skb != skb) {
+ ath10k_warn(ar, "completed old offchannel frame\n");
+ goto out;
+ }
+
+ complete(&ar->offchan_tx_completed);
+ ar->offchan_tx_skb = NULL; /* just for sanity */
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
+out:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
+{
+ struct ath10k *ar = htt->ar;
+ struct device *dev = ar->dev;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_txq *txq;
+ struct ath10k_skb_cb *skb_cb;
+ struct ath10k_txq *artxq;
+ struct sk_buff *msdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt tx completion msdu_id %u status %d\n",
+ tx_done->msdu_id, tx_done->status);
+
+ if (tx_done->msdu_id >= htt->max_num_pending_tx) {
+ ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
+ tx_done->msdu_id);
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&htt->tx_lock);
+ msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
+ if (!msdu) {
+ ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
+ tx_done->msdu_id);
+ spin_unlock_bh(&htt->tx_lock);
+ return -ENOENT;
+ }
+
+ skb_cb = ATH10K_SKB_CB(msdu);
+ txq = skb_cb->txq;
+
+ if (txq) {
+ artxq = (void *)txq->drv_priv;
+ artxq->num_fw_queued--;
+ }
+
+ ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&htt->tx_lock);
+
+ dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ ath10k_report_offchan_tx(htt->ar, msdu);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+ info->status.rates[0].idx = -1;
+
+ trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
+
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+
+ if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+ }
+
+ if (tx_done->status == HTT_TX_COMPL_STATE_ACK &&
+ tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
+ info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
+ tx_done->ack_rssi;
+ info->status.is_valid_ack_signal = true;
+ }
+
+ ieee80211_tx_status(htt->ar->hw, msdu);
+ /* we do not own the msdu anymore */
+
+ return 0;
+}
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+ const u8 *addr)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(peer, &ar->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
+{
+ struct ath10k_peer *peer;
+
+ if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
+ return NULL;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ list_for_each_entry(peer, &ar->peers, list)
+ if (test_bit(peer_id, peer->peer_ids))
+ return peer;
+
+ return NULL;
+}
+
+static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ long time_left;
+
+ time_left = wait_event_timeout(ar->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ar->data_lock);
+ mapped = !!ath10k_peer_find(ar, vdev_id, addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
+ }), 3 * HZ);
+
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+ return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
+}
+
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
+{
+ return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
+}
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+ struct htt_peer_map_event *ev)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer map event with idx out of bounds: %hu\n",
+ ev->peer_id);
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer->addr, ev->addr);
+ list_add(&peer->list, &ar->peers);
+ wake_up(&ar->peer_mapping_wq);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
+ ev->vdev_id, ev->addr, ev->peer_id);
+
+ WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
+ ar->peer_map[ev->peer_id] = peer;
+ set_bit(ev->peer_id, peer->peer_ids);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+ struct htt_peer_unmap_event *ev)
+{
+ struct ath10k *ar = htt->ar;
+ struct ath10k_peer *peer;
+
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer unmap event with idx out of bounds: %hu\n",
+ ev->peer_id);
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find_by_id(ar, ev->peer_id);
+ if (!peer) {
+ ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
+ ev->peer_id);
+ goto exit;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, ev->peer_id);
+
+ ar->peer_map[ev->peer_id] = NULL;
+ clear_bit(ev->peer_id, peer->peer_ids);
+
+ if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ar->peer_mapping_wq);
+ }
+
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
new file mode 100644
index 000000000..2bf401e43
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014,2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _TXRX_H_
+#define _TXRX_H_
+
+#include "htt.h"
+
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
+
+struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id);
+int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id,
+ const u8 *addr);
+
+void ath10k_peer_map_event(struct ath10k_htt *htt,
+ struct htt_peer_map_event *ev);
+void ath10k_peer_unmap_event(struct ath10k_htt *htt,
+ struct htt_peer_unmap_event *ev);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
new file mode 100644
index 000000000..0a76991d0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -0,0 +1,1125 @@
+/*
+ * Copyright (c) 2007-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+
+#include "debug.h"
+#include "core.h"
+#include "bmi.h"
+#include "hif.h"
+#include "htc.h"
+#include "usb.h"
+
+static void ath10k_usb_post_recv_transfers(struct ath10k *ar,
+ struct ath10k_usb_pipe *recv_pipe);
+
+/* inlined helper functions */
+
+static inline enum ath10k_htc_ep_id
+eid_from_htc_hdr(struct ath10k_htc_hdr *htc_hdr)
+{
+ return (enum ath10k_htc_ep_id)htc_hdr->eid;
+}
+
+static inline bool is_trailer_only_msg(struct ath10k_htc_hdr *htc_hdr)
+{
+ return __le16_to_cpu(htc_hdr->len) == htc_hdr->trailer_len;
+}
+
+/* pipe/urb operations */
+static struct ath10k_urb_context *
+ath10k_usb_alloc_urb_from_pipe(struct ath10k_usb_pipe *pipe)
+{
+ struct ath10k_urb_context *urb_context = NULL;
+ unsigned long flags;
+
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return NULL;
+
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+ if (!list_empty(&pipe->urb_list_head)) {
+ urb_context = list_first_entry(&pipe->urb_list_head,
+ struct ath10k_urb_context, link);
+ list_del(&urb_context->link);
+ pipe->urb_cnt--;
+ }
+ spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
+
+ return urb_context;
+}
+
+static void ath10k_usb_free_urb_to_pipe(struct ath10k_usb_pipe *pipe,
+ struct ath10k_urb_context *urb_context)
+{
+ unsigned long flags;
+
+ /* bail if this pipe is not initialized */
+ if (!pipe->ar_usb)
+ return;
+
+ spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
+
+ pipe->urb_cnt++;
+ list_add(&urb_context->link, &pipe->urb_list_head);
+
+ spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
+}
+
+static void ath10k_usb_cleanup_recv_urb(struct ath10k_urb_context *urb_context)
+{
+ dev_kfree_skb(urb_context->skb);
+ urb_context->skb = NULL;
+
+ ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+}
+
+static void ath10k_usb_free_pipe_resources(struct ath10k *ar,
+ struct ath10k_usb_pipe *pipe)
+{
+ struct ath10k_urb_context *urb_context;
+
+ if (!pipe->ar_usb) {
+ /* nothing allocated for this pipe */
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb free resources lpipe %d hpipe 0x%x urbs %d avail %d\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->urb_alloc, pipe->urb_cnt);
+
+ if (pipe->urb_alloc != pipe->urb_cnt) {
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb urb leak lpipe %d hpipe 0x%x urbs %d avail %d\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->urb_alloc, pipe->urb_cnt);
+ }
+
+ for (;;) {
+ urb_context = ath10k_usb_alloc_urb_from_pipe(pipe);
+
+ if (!urb_context)
+ break;
+
+ kfree(urb_context);
+ }
+}
+
+static void ath10k_usb_cleanup_pipe_resources(struct ath10k *ar)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ int i;
+
+ for (i = 0; i < ATH10K_USB_PIPE_MAX; i++)
+ ath10k_usb_free_pipe_resources(ar, &ar_usb->pipes[i]);
+}
+
+/* hif usb rx/tx completion functions */
+
+static void ath10k_usb_recv_complete(struct urb *urb)
+{
+ struct ath10k_urb_context *urb_context = urb->context;
+ struct ath10k_usb_pipe *pipe = urb_context->pipe;
+ struct ath10k *ar = pipe->ar_usb->ar;
+ struct sk_buff *skb;
+ int status = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb recv pipe %d stat %d len %d urb 0x%pK\n",
+ pipe->logical_pipe_num, urb->status, urb->actual_length,
+ urb);
+
+ if (urb->status != 0) {
+ status = -EIO;
+ switch (urb->status) {
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* no need to spew these errors when device
+ * removed or urb killed due to driver shutdown
+ */
+ status = -ECANCELED;
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb recv pipe %d ep 0x%2.2x failed: %d\n",
+ pipe->logical_pipe_num,
+ pipe->ep_address, urb->status);
+ break;
+ }
+ goto cleanup_recv_urb;
+ }
+
+ if (urb->actual_length == 0)
+ goto cleanup_recv_urb;
+
+ skb = urb_context->skb;
+
+ /* we are going to pass it up */
+ urb_context->skb = NULL;
+ skb_put(skb, urb->actual_length);
+
+ /* note: queue implements a lock */
+ skb_queue_tail(&pipe->io_comp_queue, skb);
+ schedule_work(&pipe->io_complete_work);
+
+cleanup_recv_urb:
+ ath10k_usb_cleanup_recv_urb(urb_context);
+
+ if (status == 0 &&
+ pipe->urb_cnt >= pipe->urb_cnt_thresh) {
+ /* our free urbs are piling up, post more transfers */
+ ath10k_usb_post_recv_transfers(ar, pipe);
+ }
+}
+
+static void ath10k_usb_transmit_complete(struct urb *urb)
+{
+ struct ath10k_urb_context *urb_context = urb->context;
+ struct ath10k_usb_pipe *pipe = urb_context->pipe;
+ struct ath10k *ar = pipe->ar_usb->ar;
+ struct sk_buff *skb;
+
+ if (urb->status != 0) {
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "pipe: %d, failed:%d\n",
+ pipe->logical_pipe_num, urb->status);
+ }
+
+ skb = urb_context->skb;
+ urb_context->skb = NULL;
+ ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+
+ /* note: queue implements a lock */
+ skb_queue_tail(&pipe->io_comp_queue, skb);
+ schedule_work(&pipe->io_complete_work);
+}
+
+/* pipe operations */
+static void ath10k_usb_post_recv_transfers(struct ath10k *ar,
+ struct ath10k_usb_pipe *recv_pipe)
+{
+ struct ath10k_urb_context *urb_context;
+ struct urb *urb;
+ int usb_status;
+
+ for (;;) {
+ urb_context = ath10k_usb_alloc_urb_from_pipe(recv_pipe);
+ if (!urb_context)
+ break;
+
+ urb_context->skb = dev_alloc_skb(ATH10K_USB_RX_BUFFER_SIZE);
+ if (!urb_context->skb)
+ goto err;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ goto err;
+
+ usb_fill_bulk_urb(urb,
+ recv_pipe->ar_usb->udev,
+ recv_pipe->usb_pipe_handle,
+ urb_context->skb->data,
+ ATH10K_USB_RX_BUFFER_SIZE,
+ ath10k_usb_recv_complete, urb_context);
+
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb bulk recv submit %d 0x%x ep 0x%2.2x len %d buf 0x%pK\n",
+ recv_pipe->logical_pipe_num,
+ recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
+ ATH10K_USB_RX_BUFFER_SIZE, urb_context->skb);
+
+ usb_anchor_urb(urb, &recv_pipe->urb_submitted);
+ usb_status = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (usb_status) {
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb bulk recv failed: %d\n",
+ usb_status);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ goto err;
+ }
+ usb_free_urb(urb);
+ }
+
+ return;
+
+err:
+ ath10k_usb_cleanup_recv_urb(urb_context);
+}
+
+static void ath10k_usb_flush_all(struct ath10k *ar)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ int i;
+
+ for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) {
+ if (ar_usb->pipes[i].ar_usb) {
+ usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted);
+ cancel_work_sync(&ar_usb->pipes[i].io_complete_work);
+ }
+ }
+}
+
+static void ath10k_usb_start_recv_pipes(struct ath10k *ar)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+
+ ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA].urb_cnt_thresh = 1;
+
+ ath10k_usb_post_recv_transfers(ar,
+ &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]);
+}
+
+static void ath10k_usb_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htc_hdr *htc_hdr;
+ struct ath10k_htc_ep *ep;
+
+ htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ ep = &ar->htc.endpoint[htc_hdr->eid];
+ ath10k_htc_notify_tx_completion(ep, skb);
+ /* The TX complete handler now owns the skb... */
+}
+
+static void ath10k_usb_rx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ath10k_htc *htc = &ar->htc;
+ struct ath10k_htc_hdr *htc_hdr;
+ enum ath10k_htc_ep_id eid;
+ struct ath10k_htc_ep *ep;
+ u16 payload_len;
+ u8 *trailer;
+ int ret;
+
+ htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ eid = eid_from_htc_hdr(htc_hdr);
+ ep = &ar->htc.endpoint[eid];
+
+ if (ep->service_id == 0) {
+ ath10k_warn(ar, "ep %d is not connected\n", eid);
+ goto out_free_skb;
+ }
+
+ payload_len = le16_to_cpu(htc_hdr->len);
+ if (!payload_len) {
+ ath10k_warn(ar, "zero length frame received, firmware crashed?\n");
+ goto out_free_skb;
+ }
+
+ if (payload_len < htc_hdr->trailer_len) {
+ ath10k_warn(ar, "malformed frame received, firmware crashed?\n");
+ goto out_free_skb;
+ }
+
+ if (htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT) {
+ trailer = skb->data + sizeof(*htc_hdr) + payload_len -
+ htc_hdr->trailer_len;
+
+ ret = ath10k_htc_process_trailer(htc,
+ trailer,
+ htc_hdr->trailer_len,
+ eid,
+ NULL,
+ NULL);
+ if (ret)
+ goto out_free_skb;
+
+ if (is_trailer_only_msg(htc_hdr))
+ goto out_free_skb;
+
+ /* strip off the trailer from the skb since it should not
+ * be passed on to upper layers
+ */
+ skb_trim(skb, skb->len - htc_hdr->trailer_len);
+ }
+
+ skb_pull(skb, sizeof(*htc_hdr));
+ ep->ep_ops.ep_rx_complete(ar, skb);
+ /* The RX complete handler now owns the skb... */
+
+ return;
+
+out_free_skb:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_usb_io_comp_work(struct work_struct *work)
+{
+ struct ath10k_usb_pipe *pipe = container_of(work,
+ struct ath10k_usb_pipe,
+ io_complete_work);
+ struct ath10k *ar = pipe->ar_usb->ar;
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
+ if (pipe->flags & ATH10K_USB_PIPE_FLAG_TX)
+ ath10k_usb_tx_complete(ar, skb);
+ else
+ ath10k_usb_rx_complete(ar, skb);
+ }
+}
+
+#define ATH10K_USB_MAX_DIAG_CMD (sizeof(struct ath10k_usb_ctrl_diag_cmd_write))
+#define ATH10K_USB_MAX_DIAG_RESP (sizeof(struct ath10k_usb_ctrl_diag_resp_read))
+
+static void ath10k_usb_destroy(struct ath10k *ar)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+
+ ath10k_usb_flush_all(ar);
+ ath10k_usb_cleanup_pipe_resources(ar);
+ usb_set_intfdata(ar_usb->interface, NULL);
+
+ kfree(ar_usb->diag_cmd_buffer);
+ kfree(ar_usb->diag_resp_buffer);
+}
+
+static int ath10k_usb_hif_start(struct ath10k *ar)
+{
+ int i;
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+
+ ath10k_usb_start_recv_pipes(ar);
+
+ /* set the TX resource avail threshold for each TX pipe */
+ for (i = ATH10K_USB_PIPE_TX_CTRL;
+ i <= ATH10K_USB_PIPE_TX_DATA_HP; i++) {
+ ar_usb->pipes[i].urb_cnt_thresh =
+ ar_usb->pipes[i].urb_alloc / 2;
+ }
+
+ return 0;
+}
+
+static int ath10k_usb_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
+ struct ath10k_hif_sg_item *items, int n_items)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct ath10k_usb_pipe *pipe = &ar_usb->pipes[pipe_id];
+ struct ath10k_urb_context *urb_context;
+ struct sk_buff *skb;
+ struct urb *urb;
+ int ret, i;
+
+ for (i = 0; i < n_items; i++) {
+ urb_context = ath10k_usb_alloc_urb_from_pipe(pipe);
+ if (!urb_context) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ skb = items[i].transfer_context;
+ urb_context->skb = skb;
+
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb) {
+ ret = -ENOMEM;
+ goto err_free_urb_to_pipe;
+ }
+
+ usb_fill_bulk_urb(urb,
+ ar_usb->udev,
+ pipe->usb_pipe_handle,
+ skb->data,
+ skb->len,
+ ath10k_usb_transmit_complete, urb_context);
+
+ if (!(skb->len % pipe->max_packet_size)) {
+ /* hit a max packet boundary on this pipe */
+ urb->transfer_flags |= URB_ZERO_PACKET;
+ }
+
+ usb_anchor_urb(urb, &pipe->urb_submitted);
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_USB_BULK,
+ "usb bulk transmit failed: %d\n", ret);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ ret = -EINVAL;
+ goto err_free_urb_to_pipe;
+ }
+
+ usb_free_urb(urb);
+ }
+
+ return 0;
+
+err_free_urb_to_pipe:
+ ath10k_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
+err:
+ return ret;
+}
+
+static void ath10k_usb_hif_stop(struct ath10k *ar)
+{
+ ath10k_usb_flush_all(ar);
+}
+
+static u16 ath10k_usb_hif_get_free_queue_number(struct ath10k *ar, u8 pipe_id)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+
+ return ar_usb->pipes[pipe_id].urb_cnt;
+}
+
+static int ath10k_usb_submit_ctrl_out(struct ath10k *ar,
+ u8 req, u16 value, u16 index, void *data,
+ u32 size)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ u8 *buf = NULL;
+ int ret;
+
+ if (size > 0) {
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ /* note: if successful returns number of bytes transferred */
+ ret = usb_control_msg(ar_usb->udev,
+ usb_sndctrlpipe(ar_usb->udev, 0),
+ req,
+ USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, buf,
+ size, 1000);
+
+ if (ret < 0) {
+ ath10k_warn(ar, "Failed to submit usb control message: %d\n",
+ ret);
+ kfree(buf);
+ return ret;
+ }
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int ath10k_usb_submit_ctrl_in(struct ath10k *ar,
+ u8 req, u16 value, u16 index, void *data,
+ u32 size)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ u8 *buf = NULL;
+ int ret;
+
+ if (size > 0) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ }
+
+ /* note: if successful returns number of bytes transferred */
+ ret = usb_control_msg(ar_usb->udev,
+ usb_rcvctrlpipe(ar_usb->udev, 0),
+ req,
+ USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE, value, index, buf,
+ size, 2000);
+
+ if (ret < 0) {
+ ath10k_warn(ar, "Failed to read usb control message: %d\n",
+ ret);
+ kfree(buf);
+ return ret;
+ }
+
+ memcpy((u8 *)data, buf, size);
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int ath10k_usb_ctrl_msg_exchange(struct ath10k *ar,
+ u8 req_val, u8 *req_buf, u32 req_len,
+ u8 resp_val, u8 *resp_buf,
+ u32 *resp_len)
+{
+ int ret;
+
+ /* send command */
+ ret = ath10k_usb_submit_ctrl_out(ar, req_val, 0, 0,
+ req_buf, req_len);
+ if (ret)
+ goto err;
+
+ /* get response */
+ if (resp_buf) {
+ ret = ath10k_usb_submit_ctrl_in(ar, resp_val, 0, 0,
+ resp_buf, *resp_len);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath10k_usb_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct ath10k_usb_ctrl_diag_cmd_read *cmd;
+ u32 resp_len;
+ int ret;
+
+ if (buf_len < sizeof(struct ath10k_usb_ctrl_diag_resp_read))
+ return -EINVAL;
+
+ cmd = (struct ath10k_usb_ctrl_diag_cmd_read *)ar_usb->diag_cmd_buffer;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->cmd = ATH10K_USB_CTRL_DIAG_CC_READ;
+ cmd->address = cpu_to_le32(address);
+ resp_len = sizeof(struct ath10k_usb_ctrl_diag_resp_read);
+
+ ret = ath10k_usb_ctrl_msg_exchange(ar,
+ ATH10K_USB_CONTROL_REQ_DIAG_CMD,
+ (u8 *)cmd,
+ sizeof(*cmd),
+ ATH10K_USB_CONTROL_REQ_DIAG_RESP,
+ ar_usb->diag_resp_buffer, &resp_len);
+ if (ret)
+ return ret;
+
+ if (resp_len != sizeof(struct ath10k_usb_ctrl_diag_resp_read))
+ return -EMSGSIZE;
+
+ memcpy(buf, ar_usb->diag_resp_buffer,
+ sizeof(struct ath10k_usb_ctrl_diag_resp_read));
+
+ return 0;
+}
+
+static int ath10k_usb_hif_diag_write(struct ath10k *ar, u32 address,
+ const void *data, int nbytes)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct ath10k_usb_ctrl_diag_cmd_write *cmd;
+ int ret;
+
+ if (nbytes != sizeof(cmd->value))
+ return -EINVAL;
+
+ cmd = (struct ath10k_usb_ctrl_diag_cmd_write *)ar_usb->diag_cmd_buffer;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->cmd = cpu_to_le32(ATH10K_USB_CTRL_DIAG_CC_WRITE);
+ cmd->address = cpu_to_le32(address);
+ memcpy(&cmd->value, data, nbytes);
+
+ ret = ath10k_usb_ctrl_msg_exchange(ar,
+ ATH10K_USB_CONTROL_REQ_DIAG_CMD,
+ (u8 *)cmd,
+ sizeof(*cmd),
+ 0, NULL, NULL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath10k_usb_bmi_exchange_msg(struct ath10k *ar,
+ void *req, u32 req_len,
+ void *resp, u32 *resp_len)
+{
+ int ret;
+
+ if (req) {
+ ret = ath10k_usb_submit_ctrl_out(ar,
+ ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD,
+ 0, 0, req, req_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "unable to send the bmi data to the device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (resp) {
+ ret = ath10k_usb_submit_ctrl_in(ar,
+ ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP,
+ 0, 0, resp, *resp_len);
+ if (ret) {
+ ath10k_warn(ar,
+ "Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath10k_usb_hif_get_default_pipe(struct ath10k *ar,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ *ul_pipe = ATH10K_USB_PIPE_TX_CTRL;
+ *dl_pipe = ATH10K_USB_PIPE_RX_CTRL;
+}
+
+static int ath10k_usb_hif_map_service_to_pipe(struct ath10k *ar, u16 svc_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ switch (svc_id) {
+ case ATH10K_HTC_SVC_ID_RSVD_CTRL:
+ case ATH10K_HTC_SVC_ID_WMI_CONTROL:
+ *ul_pipe = ATH10K_USB_PIPE_TX_CTRL;
+ /* due to large control packets, shift to data pipe */
+ *dl_pipe = ATH10K_USB_PIPE_RX_DATA;
+ break;
+ case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
+ *ul_pipe = ATH10K_USB_PIPE_TX_DATA_LP;
+ /* Disable rxdata2 directly, it will be enabled
+ * if FW enable rxdata2
+ */
+ *dl_pipe = ATH10K_USB_PIPE_RX_DATA;
+ break;
+ default:
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/* This op is currently only used by htc_wait_target if the HTC ready
+ * message times out. It is not applicable for USB since there is nothing
+ * we can do if the HTC ready message does not arrive in time.
+ * TODO: Make this op non mandatory by introducing a NULL check in the
+ * hif op wrapper.
+ */
+static void ath10k_usb_hif_send_complete_check(struct ath10k *ar,
+ u8 pipe, int force)
+{
+}
+
+static int ath10k_usb_hif_power_up(struct ath10k *ar)
+{
+ return 0;
+}
+
+static void ath10k_usb_hif_power_down(struct ath10k *ar)
+{
+ ath10k_usb_flush_all(ar);
+}
+
+#ifdef CONFIG_PM
+
+static int ath10k_usb_hif_suspend(struct ath10k *ar)
+{
+ return -EOPNOTSUPP;
+}
+
+static int ath10k_usb_hif_resume(struct ath10k *ar)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static const struct ath10k_hif_ops ath10k_usb_hif_ops = {
+ .tx_sg = ath10k_usb_hif_tx_sg,
+ .diag_read = ath10k_usb_hif_diag_read,
+ .diag_write = ath10k_usb_hif_diag_write,
+ .exchange_bmi_msg = ath10k_usb_bmi_exchange_msg,
+ .start = ath10k_usb_hif_start,
+ .stop = ath10k_usb_hif_stop,
+ .map_service_to_pipe = ath10k_usb_hif_map_service_to_pipe,
+ .get_default_pipe = ath10k_usb_hif_get_default_pipe,
+ .send_complete_check = ath10k_usb_hif_send_complete_check,
+ .get_free_queue_number = ath10k_usb_hif_get_free_queue_number,
+ .power_up = ath10k_usb_hif_power_up,
+ .power_down = ath10k_usb_hif_power_down,
+#ifdef CONFIG_PM
+ .suspend = ath10k_usb_hif_suspend,
+ .resume = ath10k_usb_hif_resume,
+#endif
+};
+
+static u8 ath10k_usb_get_logical_pipe_num(u8 ep_address, int *urb_count)
+{
+ u8 pipe_num = ATH10K_USB_PIPE_INVALID;
+
+ switch (ep_address) {
+ case ATH10K_USB_EP_ADDR_APP_CTRL_IN:
+ pipe_num = ATH10K_USB_PIPE_RX_CTRL;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA_IN:
+ pipe_num = ATH10K_USB_PIPE_RX_DATA;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_INT_IN:
+ pipe_num = ATH10K_USB_PIPE_RX_INT;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA2_IN:
+ pipe_num = ATH10K_USB_PIPE_RX_DATA2;
+ *urb_count = RX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_CTRL_OUT:
+ pipe_num = ATH10K_USB_PIPE_TX_CTRL;
+ *urb_count = TX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT:
+ pipe_num = ATH10K_USB_PIPE_TX_DATA_LP;
+ *urb_count = TX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT:
+ pipe_num = ATH10K_USB_PIPE_TX_DATA_MP;
+ *urb_count = TX_URB_COUNT;
+ break;
+ case ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT:
+ pipe_num = ATH10K_USB_PIPE_TX_DATA_HP;
+ *urb_count = TX_URB_COUNT;
+ break;
+ default:
+ /* note: there may be endpoints not currently used */
+ break;
+ }
+
+ return pipe_num;
+}
+
+static int ath10k_usb_alloc_pipe_resources(struct ath10k *ar,
+ struct ath10k_usb_pipe *pipe,
+ int urb_cnt)
+{
+ struct ath10k_urb_context *urb_context;
+ int i;
+
+ INIT_LIST_HEAD(&pipe->urb_list_head);
+ init_usb_anchor(&pipe->urb_submitted);
+
+ for (i = 0; i < urb_cnt; i++) {
+ urb_context = kzalloc(sizeof(*urb_context), GFP_KERNEL);
+ if (!urb_context)
+ return -ENOMEM;
+
+ urb_context->pipe = pipe;
+
+ /* we are only allocate the urb contexts here, the actual URB
+ * is allocated from the kernel as needed to do a transaction
+ */
+ pipe->urb_alloc++;
+ ath10k_usb_free_urb_to_pipe(pipe, urb_context);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb alloc resources lpipe %d hpipe 0x%x urbs %d\n",
+ pipe->logical_pipe_num, pipe->usb_pipe_handle,
+ pipe->urb_alloc);
+
+ return 0;
+}
+
+static int ath10k_usb_setup_pipe_resources(struct ath10k *ar,
+ struct usb_interface *interface)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct usb_host_interface *iface_desc = interface->cur_altsetting;
+ struct usb_endpoint_descriptor *endpoint;
+ struct ath10k_usb_pipe *pipe;
+ int ret, i, urbcount;
+ u8 pipe_num;
+
+ ath10k_dbg(ar, ATH10K_DBG_USB, "usb setting up pipes using interface\n");
+
+ /* walk decriptors and setup pipes */
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
+ endpoint = &iface_desc->endpoint[i].desc;
+
+ if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) {
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb %s bulk ep 0x%2.2x maxpktsz %d\n",
+ ATH10K_USB_IS_DIR_IN
+ (endpoint->bEndpointAddress) ?
+ "rx" : "tx", endpoint->bEndpointAddress,
+ le16_to_cpu(endpoint->wMaxPacketSize));
+ } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) {
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb %s int ep 0x%2.2x maxpktsz %d interval %d\n",
+ ATH10K_USB_IS_DIR_IN
+ (endpoint->bEndpointAddress) ?
+ "rx" : "tx", endpoint->bEndpointAddress,
+ le16_to_cpu(endpoint->wMaxPacketSize),
+ endpoint->bInterval);
+ } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
+ /* TODO for ISO */
+ ath10k_dbg(ar, ATH10K_DBG_USB,
+ "usb %s isoc ep 0x%2.2x maxpktsz %d interval %d\n",
+ ATH10K_USB_IS_DIR_IN
+ (endpoint->bEndpointAddress) ?
+ "rx" : "tx", endpoint->bEndpointAddress,
+ le16_to_cpu(endpoint->wMaxPacketSize),
+ endpoint->bInterval);
+ }
+
+ /* Ignore broken descriptors. */
+ if (usb_endpoint_maxp(endpoint) == 0)
+ continue;
+
+ urbcount = 0;
+
+ pipe_num =
+ ath10k_usb_get_logical_pipe_num(endpoint->bEndpointAddress,
+ &urbcount);
+ if (pipe_num == ATH10K_USB_PIPE_INVALID)
+ continue;
+
+ pipe = &ar_usb->pipes[pipe_num];
+ if (pipe->ar_usb)
+ /* hmmm..pipe was already setup */
+ continue;
+
+ pipe->ar_usb = ar_usb;
+ pipe->logical_pipe_num = pipe_num;
+ pipe->ep_address = endpoint->bEndpointAddress;
+ pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize);
+
+ if (ATH10K_USB_IS_BULK_EP(endpoint->bmAttributes)) {
+ if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) {
+ pipe->usb_pipe_handle =
+ usb_rcvbulkpipe(ar_usb->udev,
+ pipe->ep_address);
+ } else {
+ pipe->usb_pipe_handle =
+ usb_sndbulkpipe(ar_usb->udev,
+ pipe->ep_address);
+ }
+ } else if (ATH10K_USB_IS_INT_EP(endpoint->bmAttributes)) {
+ if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) {
+ pipe->usb_pipe_handle =
+ usb_rcvintpipe(ar_usb->udev,
+ pipe->ep_address);
+ } else {
+ pipe->usb_pipe_handle =
+ usb_sndintpipe(ar_usb->udev,
+ pipe->ep_address);
+ }
+ } else if (ATH10K_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
+ /* TODO for ISO */
+ if (ATH10K_USB_IS_DIR_IN(pipe->ep_address)) {
+ pipe->usb_pipe_handle =
+ usb_rcvisocpipe(ar_usb->udev,
+ pipe->ep_address);
+ } else {
+ pipe->usb_pipe_handle =
+ usb_sndisocpipe(ar_usb->udev,
+ pipe->ep_address);
+ }
+ }
+
+ pipe->ep_desc = endpoint;
+
+ if (!ATH10K_USB_IS_DIR_IN(pipe->ep_address))
+ pipe->flags |= ATH10K_USB_PIPE_FLAG_TX;
+
+ ret = ath10k_usb_alloc_pipe_resources(ar, pipe, urbcount);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_usb_create(struct ath10k *ar,
+ struct usb_interface *interface)
+{
+ struct ath10k_usb *ar_usb = ath10k_usb_priv(ar);
+ struct usb_device *dev = interface_to_usbdev(interface);
+ struct ath10k_usb_pipe *pipe;
+ int ret, i;
+
+ usb_set_intfdata(interface, ar_usb);
+ spin_lock_init(&ar_usb->cs_lock);
+ ar_usb->udev = dev;
+ ar_usb->interface = interface;
+
+ for (i = 0; i < ATH10K_USB_PIPE_MAX; i++) {
+ pipe = &ar_usb->pipes[i];
+ INIT_WORK(&pipe->io_complete_work,
+ ath10k_usb_io_comp_work);
+ skb_queue_head_init(&pipe->io_comp_queue);
+ }
+
+ ar_usb->diag_cmd_buffer = kzalloc(ATH10K_USB_MAX_DIAG_CMD, GFP_KERNEL);
+ if (!ar_usb->diag_cmd_buffer) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ar_usb->diag_resp_buffer = kzalloc(ATH10K_USB_MAX_DIAG_RESP,
+ GFP_KERNEL);
+ if (!ar_usb->diag_resp_buffer) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = ath10k_usb_setup_pipe_resources(ar, interface);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath10k_usb_destroy(ar);
+ return ret;
+}
+
+/* ath10k usb driver registered functions */
+static int ath10k_usb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct ath10k *ar;
+ struct ath10k_usb *ar_usb;
+ struct usb_device *dev = interface_to_usbdev(interface);
+ int ret, vendor_id, product_id;
+ enum ath10k_hw_rev hw_rev;
+ u32 chip_id;
+
+ /* Assumption: All USB based chipsets (so far) are QCA9377 based.
+ * If there will be newer chipsets that does not use the hw reg
+ * setup as defined in qca6174_regs and qca6174_values, this
+ * assumption is no longer valid and hw_rev must be setup differently
+ * depending on chipset.
+ */
+ hw_rev = ATH10K_HW_QCA9377;
+
+ ar = ath10k_core_create(sizeof(*ar_usb), &dev->dev, ATH10K_BUS_USB,
+ hw_rev, &ath10k_usb_hif_ops);
+ if (!ar) {
+ dev_err(&dev->dev, "failed to allocate core\n");
+ return -ENOMEM;
+ }
+
+ usb_get_dev(dev);
+ vendor_id = le16_to_cpu(dev->descriptor.idVendor);
+ product_id = le16_to_cpu(dev->descriptor.idProduct);
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "usb new func vendor 0x%04x product 0x%04x\n",
+ vendor_id, product_id);
+
+ ar_usb = ath10k_usb_priv(ar);
+ ret = ath10k_usb_create(ar, interface);
+ if (ret)
+ goto err;
+ ar_usb->ar = ar;
+
+ ar->dev_id = product_id;
+ ar->id.vendor = vendor_id;
+ ar->id.device = product_id;
+
+ /* TODO: don't know yet how to get chip_id with USB */
+ chip_id = 0;
+ ret = ath10k_core_register(ar, chip_id);
+ if (ret) {
+ ath10k_warn(ar, "failed to register driver core: %d\n", ret);
+ goto err_usb_destroy;
+ }
+
+ /* TODO: remove this once USB support is fully implemented */
+ ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n");
+
+ return 0;
+
+err_usb_destroy:
+ ath10k_usb_destroy(ar);
+
+err:
+ ath10k_core_destroy(ar);
+
+ usb_put_dev(dev);
+
+ return ret;
+}
+
+static void ath10k_usb_remove(struct usb_interface *interface)
+{
+ struct ath10k_usb *ar_usb;
+
+ ar_usb = usb_get_intfdata(interface);
+ if (!ar_usb)
+ return;
+
+ ath10k_core_unregister(ar_usb->ar);
+ ath10k_usb_destroy(ar_usb->ar);
+ usb_put_dev(interface_to_usbdev(interface));
+ ath10k_core_destroy(ar_usb->ar);
+}
+
+#ifdef CONFIG_PM
+
+static int ath10k_usb_pm_suspend(struct usb_interface *interface,
+ pm_message_t message)
+{
+ struct ath10k_usb *ar_usb = usb_get_intfdata(interface);
+
+ ath10k_usb_flush_all(ar_usb->ar);
+ return 0;
+}
+
+static int ath10k_usb_pm_resume(struct usb_interface *interface)
+{
+ struct ath10k_usb *ar_usb = usb_get_intfdata(interface);
+ struct ath10k *ar = ar_usb->ar;
+
+ ath10k_usb_post_recv_transfers(ar,
+ &ar_usb->pipes[ATH10K_USB_PIPE_RX_DATA]);
+
+ return 0;
+}
+
+#else
+
+#define ath10k_usb_pm_suspend NULL
+#define ath10k_usb_pm_resume NULL
+
+#endif
+
+/* table of devices that work with this driver */
+static struct usb_device_id ath10k_usb_ids[] = {
+ {USB_DEVICE(0x13b1, 0x0042)}, /* Linksys WUSB6100M */
+ { /* Terminating entry */ },
+};
+
+MODULE_DEVICE_TABLE(usb, ath10k_usb_ids);
+
+static struct usb_driver ath10k_usb_driver = {
+ .name = "ath10k_usb",
+ .probe = ath10k_usb_probe,
+ .suspend = ath10k_usb_pm_suspend,
+ .resume = ath10k_usb_pm_resume,
+ .disconnect = ath10k_usb_remove,
+ .id_table = ath10k_usb_ids,
+ .supports_autosuspend = true,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(ath10k_usb_driver);
+
+MODULE_AUTHOR("Atheros Communications, Inc.");
+MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN USB devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h
new file mode 100644
index 000000000..f60a3cc7d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/usb.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _USB_H_
+#define _USB_H_
+
+/* constants */
+#define TX_URB_COUNT 32
+#define RX_URB_COUNT 32
+#define ATH10K_USB_RX_BUFFER_SIZE 4096
+
+#define ATH10K_USB_PIPE_INVALID ATH10K_USB_PIPE_MAX
+
+/* USB endpoint definitions */
+#define ATH10K_USB_EP_ADDR_APP_CTRL_IN 0x81
+#define ATH10K_USB_EP_ADDR_APP_DATA_IN 0x82
+#define ATH10K_USB_EP_ADDR_APP_DATA2_IN 0x83
+#define ATH10K_USB_EP_ADDR_APP_INT_IN 0x84
+
+#define ATH10K_USB_EP_ADDR_APP_CTRL_OUT 0x01
+#define ATH10K_USB_EP_ADDR_APP_DATA_LP_OUT 0x02
+#define ATH10K_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
+#define ATH10K_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
+
+/* diagnostic command defnitions */
+#define ATH10K_USB_CONTROL_REQ_SEND_BMI_CMD 1
+#define ATH10K_USB_CONTROL_REQ_RECV_BMI_RESP 2
+#define ATH10K_USB_CONTROL_REQ_DIAG_CMD 3
+#define ATH10K_USB_CONTROL_REQ_DIAG_RESP 4
+
+#define ATH10K_USB_CTRL_DIAG_CC_READ 0
+#define ATH10K_USB_CTRL_DIAG_CC_WRITE 1
+
+#define ATH10K_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02)
+#define ATH10K_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03)
+#define ATH10K_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01)
+#define ATH10K_USB_IS_DIR_IN(addr) ((addr) & 0x80)
+
+struct ath10k_usb_ctrl_diag_cmd_write {
+ __le32 cmd;
+ __le32 address;
+ __le32 value;
+ __le32 padding;
+} __packed;
+
+struct ath10k_usb_ctrl_diag_cmd_read {
+ __le32 cmd;
+ __le32 address;
+} __packed;
+
+struct ath10k_usb_ctrl_diag_resp_read {
+ u8 value[4];
+} __packed;
+
+/* tx/rx pipes for usb */
+enum ath10k_usb_pipe_id {
+ ATH10K_USB_PIPE_TX_CTRL = 0,
+ ATH10K_USB_PIPE_TX_DATA_LP,
+ ATH10K_USB_PIPE_TX_DATA_MP,
+ ATH10K_USB_PIPE_TX_DATA_HP,
+ ATH10K_USB_PIPE_RX_CTRL,
+ ATH10K_USB_PIPE_RX_DATA,
+ ATH10K_USB_PIPE_RX_DATA2,
+ ATH10K_USB_PIPE_RX_INT,
+ ATH10K_USB_PIPE_MAX
+};
+
+struct ath10k_usb_pipe {
+ struct list_head urb_list_head;
+ struct usb_anchor urb_submitted;
+ u32 urb_alloc;
+ u32 urb_cnt;
+ u32 urb_cnt_thresh;
+ unsigned int usb_pipe_handle;
+ u32 flags;
+ u8 ep_address;
+ u8 logical_pipe_num;
+ struct ath10k_usb *ar_usb;
+ u16 max_packet_size;
+ struct work_struct io_complete_work;
+ struct sk_buff_head io_comp_queue;
+ struct usb_endpoint_descriptor *ep_desc;
+};
+
+#define ATH10K_USB_PIPE_FLAG_TX BIT(0)
+
+/* usb device object */
+struct ath10k_usb {
+ /* protects pipe->urb_list_head and pipe->urb_cnt */
+ spinlock_t cs_lock;
+
+ struct usb_device *udev;
+ struct usb_interface *interface;
+ struct ath10k_usb_pipe pipes[ATH10K_USB_PIPE_MAX];
+ u8 *diag_cmd_buffer;
+ u8 *diag_resp_buffer;
+ struct ath10k *ar;
+};
+
+/* usb urb object */
+struct ath10k_urb_context {
+ struct list_head link;
+ struct ath10k_usb_pipe *pipe;
+ struct sk_buff *skb;
+ struct ath10k *ar;
+};
+
+static inline struct ath10k_usb *ath10k_usb_priv(struct ath10k *ar)
+{
+ return (struct ath10k_usb *)ar->drv_priv;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
new file mode 100644
index 000000000..b6cd33fa7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -0,0 +1,1568 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_OPS_H_
+#define _WMI_OPS_H_
+
+struct ath10k;
+struct sk_buff;
+
+struct wmi_ops {
+ void (*rx)(struct ath10k *ar, struct sk_buff *skb);
+ void (*map_svc)(const __le32 *in, unsigned long *out, size_t len);
+ void (*map_svc_ext)(const __le32 *in, unsigned long *out, size_t len);
+
+ int (*pull_scan)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg);
+ int (*pull_mgmt_rx)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg);
+ int (*pull_mgmt_tx_compl)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg);
+ int (*pull_ch_info)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg);
+ int (*pull_vdev_start)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg);
+ int (*pull_peer_kick)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg);
+ int (*pull_swba)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg);
+ int (*pull_phyerr_hdr)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg);
+ int (*pull_phyerr)(struct ath10k *ar, const void *phyerr_buf,
+ int left_len, struct wmi_phyerr_ev_arg *arg);
+ int (*pull_svc_rdy)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg);
+ int (*pull_rdy)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg);
+ int (*pull_fw_stats)(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats);
+ int (*pull_roam_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg);
+ int (*pull_wow_event)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg);
+ int (*pull_echo_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg);
+ int (*pull_dfs_status_ev)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_dfs_status_ev_arg *arg);
+ int (*pull_svc_avail)(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_avail_ev_arg *arg);
+
+ enum wmi_txbf_conf (*get_txbf_conf_scheme)(struct ath10k *ar);
+
+ struct sk_buff *(*gen_pdev_suspend)(struct ath10k *ar, u32 suspend_opt);
+ struct sk_buff *(*gen_pdev_resume)(struct ath10k *ar);
+ struct sk_buff *(*gen_pdev_set_rd)(struct ath10k *ar, u16 rd, u16 rd2g,
+ u16 rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg);
+ struct sk_buff *(*gen_pdev_set_param)(struct ath10k *ar, u32 id,
+ u32 value);
+ struct sk_buff *(*gen_init)(struct ath10k *ar);
+ struct sk_buff *(*gen_start_scan)(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg);
+ struct sk_buff *(*gen_stop_scan)(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg);
+ struct sk_buff *(*gen_vdev_create)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN]);
+ struct sk_buff *(*gen_vdev_delete)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_start)(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart);
+ struct sk_buff *(*gen_vdev_stop)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_up)(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid);
+ struct sk_buff *(*gen_vdev_down)(struct ath10k *ar, u32 vdev_id);
+ struct sk_buff *(*gen_vdev_set_param)(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+ struct sk_buff *(*gen_vdev_install_key)(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg);
+ struct sk_buff *(*gen_vdev_spectral_conf)(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg);
+ struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable);
+ struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg);
+ struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type);
+ struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN]);
+ struct sk_buff *(*gen_peer_flush)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ u32 tid_bitmap);
+ struct sk_buff *(*gen_peer_set_param)(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value);
+ struct sk_buff *(*gen_peer_assoc)(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg);
+ struct sk_buff *(*gen_set_psmode)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode);
+ struct sk_buff *(*gen_set_sta_ps)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value);
+ struct sk_buff *(*gen_set_ap_ps)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id,
+ u32 value);
+ struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg);
+ struct sk_buff *(*gen_scan_prob_req_oui)(struct ath10k *ar,
+ u32 prob_req_oui);
+ struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab);
+ struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg);
+ struct sk_buff *(*gen_request_stats)(struct ath10k *ar, u32 stats_mask);
+ struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type,
+ u32 delay_ms);
+ struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
+ struct sk_buff *(*gen_mgmt_tx_send)(struct ath10k *ar,
+ struct sk_buff *skb,
+ dma_addr_t paddr);
+ int (*cleanup_mgmt_tx_send)(struct ath10k *ar, struct sk_buff *msdu);
+ struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u64 module_enable,
+ u32 log_level);
+ struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
+ struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
+ struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
+ u32 period, u32 duration,
+ u32 next_offset,
+ u32 enabled);
+ struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
+ struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac);
+ struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid, u32 buf_size);
+ struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid,
+ u32 status);
+ struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac, u32 tid, u32 initiator,
+ u32 reason);
+ struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
+ u32 tim_ie_offset, struct sk_buff *bcn,
+ u32 prb_caps, u32 prb_erp,
+ void *prb_ies, size_t prb_ies_len);
+ struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
+ struct sk_buff *bcn);
+ struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
+ struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac);
+ struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+ struct sk_buff *(*gen_wow_enable)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_wakeup_event)(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+ struct sk_buff *(*gen_wow_host_wakeup_ind)(struct ath10k *ar);
+ struct sk_buff *(*gen_wow_add_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id,
+ const u8 *pattern,
+ const u8 *mask,
+ int pattern_len,
+ int pattern_offset);
+ struct sk_buff *(*gen_wow_del_pattern)(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id);
+ struct sk_buff *(*gen_update_fw_tdls_state)(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_tdls_state state);
+ struct sk_buff *(*gen_tdls_peer_update)(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan);
+ struct sk_buff *(*gen_radar_found)
+ (struct ath10k *ar,
+ const struct ath10k_radar_found_info *arg);
+ struct sk_buff *(*gen_adaptive_qcs)(struct ath10k *ar, bool enable);
+ struct sk_buff *(*gen_pdev_get_tpc_config)(struct ath10k *ar,
+ u32 param);
+ void (*fw_stats_fill)(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+ struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
+ u8 enable,
+ u32 detect_level,
+ u32 detect_margin);
+ struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap);
+ int (*get_vdev_subtype)(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype);
+ struct sk_buff *(*gen_pdev_bss_chan_info_req)
+ (struct ath10k *ar,
+ enum wmi_bss_survey_req_type type);
+ struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value);
+ struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar,
+ u32 param);
+
+};
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
+
+static inline int
+ath10k_wmi_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ if (WARN_ON_ONCE(!ar->wmi.ops->rx))
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->rx(ar, skb);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_map_svc(struct ath10k *ar, const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ if (!ar->wmi.ops->map_svc)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->map_svc(in, out, len);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_map_svc_ext(struct ath10k *ar, const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ if (!ar->wmi.ops->map_svc_ext)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->map_svc_ext(in, out, len);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pull_scan(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_scan)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_scan(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_tx_compl)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_tx_compl(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_mgmt_rx(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_mgmt_rx)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_mgmt_rx(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_ch_info(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_ch_info)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_ch_info(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_vdev_start(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_vdev_start)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_vdev_start(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_peer_kick(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_peer_kick)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_peer_kick(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_swba(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_swba)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_swba(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr_hdr(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ if (!ar->wmi.ops->pull_phyerr_hdr)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_phyerr_hdr(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_phyerr(struct ath10k *ar, const void *phyerr_buf,
+ int left_len, struct wmi_phyerr_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_phyerr)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_phyerr(ar, phyerr_buf, left_len, arg);
+}
+
+static inline int
+ath10k_wmi_pull_svc_rdy(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_svc_rdy)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_svc_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_rdy(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_rdy)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_rdy(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_svc_avail(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_avail_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_svc_avail)
+ return -EOPNOTSUPP;
+ return ar->wmi.ops->pull_svc_avail(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ if (!ar->wmi.ops->pull_fw_stats)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_fw_stats(ar, skb, stats);
+}
+
+static inline int
+ath10k_wmi_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_roam_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_roam_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_wow_event(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_wow_event)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_wow_event(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_echo_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_echo_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_echo_ev(ar, skb, arg);
+}
+
+static inline int
+ath10k_wmi_pull_dfs_status(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_dfs_status_ev_arg *arg)
+{
+ if (!ar->wmi.ops->pull_dfs_status_ev)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->pull_dfs_status_ev(ar, skb, arg);
+}
+
+static inline enum wmi_txbf_conf
+ath10k_wmi_get_txbf_conf_scheme(struct ath10k *ar)
+{
+ if (!ar->wmi.ops->get_txbf_conf_scheme)
+ return WMI_TXBF_CONF_UNSUPPORTED;
+
+ return ar->wmi.ops->get_txbf_conf_scheme(ar);
+}
+
+static inline int
+ath10k_wmi_cleanup_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu)
+{
+ if (!ar->wmi.ops->cleanup_mgmt_tx_send)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->cleanup_mgmt_tx_send(ar, msdu);
+}
+
+static inline int
+ath10k_wmi_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
+ dma_addr_t paddr)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_mgmt_tx_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_mgmt_tx_send(ar, msdu, paddr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->mgmt_tx_send_cmdid);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu);
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_mgmt_tx)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_mgmt_tx(ar, msdu);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->mgmt_tx_cmdid);
+ if (ret)
+ return ret;
+
+ /* FIXME There's no ACK event for Management Tx. This probably
+ * shouldn't be called here either.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_rd)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_rd(ar, rd, rd2g, rd5g, ctl2g, ctl5g,
+ dfs_reg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_regdomain_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_suspend)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_suspend(ar, suspend_opt);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_resume_target(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_resume)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_resume(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_param(ar, id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_cmd_init(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_init)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_init(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->init_cmdid);
+}
+
+static inline int
+ath10k_wmi_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_start_scan)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_start_scan(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_stop_scan)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_stop_scan(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_create)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_create(ar, vdev_id, type, subtype, macaddr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_delete)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_delete(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_start)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_start(ar, arg, false);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_start_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_restart(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_start)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_start(ar, arg, true);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_restart_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_stop)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_stop(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_up)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_up(ar, vdev_id, aid, bssid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_down)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_down(ar, vdev_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id, u32 param_id,
+ u32 param_value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_set_param(ar, vdev_id, param_id,
+ param_value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_vdev_install_key)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_install_key(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->vdev_install_key_cmdid);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_vdev_spectral_conf)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_spectral_scan_configure_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
+ u32 enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_vdev_spectral_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
+ enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_spectral_scan_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_vdev_sta_uapsd)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
+ num_ac);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_create)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_create(ar, vdev_id, peer_addr, peer_type);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_delete)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_delete(ar, vdev_id, peer_addr);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_flush)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_flush(ar, vdev_id, peer_addr, tid_bitmap);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id, const u8 *peer_addr,
+ enum wmi_peer_param param_id, u32 param_value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_set_param)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_set_param(ar, vdev_id, peer_addr, param_id,
+ param_value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_psmode)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_psmode(ar, vdev_id, psmode);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_sta_ps)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_sta_ps(ar, vdev_id, param_id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->sta_powersave_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_set_ap_ps)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_set_ap_ps(ar, vdev_id, mac, param_id, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ap_ps_peer_param_cmdid);
+}
+
+static inline int
+ath10k_wmi_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_scan_chan_list)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_scan_chan_list(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
+}
+
+static inline int
+ath10k_wmi_scan_prob_req_oui(struct ath10k *ar, const u8 mac_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+ u32 prob_req_oui;
+
+ prob_req_oui = (((u32)mac_addr[0]) << 16) |
+ (((u32)mac_addr[1]) << 8) | mac_addr[2];
+
+ if (!ar->wmi.ops->gen_scan_prob_req_oui)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_scan_prob_req_oui(ar, prob_req_oui);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->scan_prob_req_oui_cmdid);
+}
+
+static inline int
+ath10k_wmi_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_peer_assoc)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_peer_assoc(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
+}
+
+static inline int
+ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+{
+ struct sk_buff *skb;
+ int ret;
+
+ if (!ar->wmi.ops->gen_beacon_dma)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
+ dtim_zero, deliver_cab);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb,
+ ar->wmi.cmd->pdev_send_bcn_cmdid);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_wmm)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_wmm(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_wmm_params_cmdid);
+}
+
+static inline int
+ath10k_wmi_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_request_stats)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_request_stats(ar, stats_mask);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
+}
+
+static inline int
+ath10k_wmi_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_force_fw_hang)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_force_fw_hang(ar, type, delay_ms);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
+}
+
+static inline int
+ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_dbglog_cfg)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pktlog_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pktlog_enable(ar, filter);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_pktlog_enable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pktlog_disable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pktlog_disable(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_pktlog_disable_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_set_quiet_mode(struct ath10k *ar, u32 period, u32 duration,
+ u32 next_offset, u32 enabled)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_set_quiet_mode)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_set_quiet_mode(ar, period, duration,
+ next_offset, enabled);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_set_quiet_mode_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_get_temperature)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_get_temperature(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_get_temperature_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_clear_resp)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_clear_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_addba_set_resp)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->addba_set_resp_cmdid);
+}
+
+static inline int
+ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_delba_send)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
+ reason);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->delba_send_cmdid);
+}
+
+static inline int
+ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
+ struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
+ void *prb_ies, size_t prb_ies_len)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_bcn_tmpl)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
+ prb_caps, prb_erp, prb_ies,
+ prb_ies_len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_prb_tmpl)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
+}
+
+static inline int
+ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
+}
+
+static inline int
+ath10k_wmi_sta_keepalive(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_sta_keepalive)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_enable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_enable)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_enable(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_wakeup_event(struct ath10k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_wakeup_event)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_wakeup_event(ar, vdev_id, event, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_enable_disable_wake_event_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_host_wakeup_ind)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_host_wakeup_ind(ar);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_hostwakeup_from_sleep_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_add_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_add_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_add_pattern(ar, vdev_id, pattern_id,
+ pattern, mask, pattern_len,
+ pattern_offset);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_add_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_wow_del_pattern(struct ath10k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct sk_buff *skb;
+ u32 cmd_id;
+
+ if (!ar->wmi.ops->gen_wow_del_pattern)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_wow_del_pattern(ar, vdev_id, pattern_id);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ cmd_id = ar->wmi.cmd->wow_del_wake_pattern_cmdid;
+ return ath10k_wmi_cmd_send(ar, skb, cmd_id);
+}
+
+static inline int
+ath10k_wmi_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_update_fw_tdls_state)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_update_fw_tdls_state(ar, vdev_id, state);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->tdls_set_state_cmdid);
+}
+
+static inline int
+ath10k_wmi_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_tdls_peer_update)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_tdls_peer_update(ar, arg, cap, chan);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->tdls_peer_update_cmdid);
+}
+
+static inline int
+ath10k_wmi_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_adaptive_qcs)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_adaptive_qcs(ar, enable);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->adaptive_qcs_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_get_tpc_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_get_tpc_config(ar, param);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_get_tpc_config_cmdid);
+}
+
+static inline int
+ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ if (!ar->wmi.ops->fw_stats_fill)
+ return -EOPNOTSUPP;
+
+ ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
+ return 0;
+}
+
+static inline int
+ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
+ u32 detect_level, u32 detect_margin)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
+ detect_level,
+ detect_margin);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
+}
+
+static inline int
+ath10k_wmi_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->ext_resource_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->ext_resource_config(ar, type,
+ fw_feature_bitmap);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ext_resource_cfg_cmdid);
+}
+
+static inline int
+ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
+{
+ if (!ar->wmi.ops->get_vdev_subtype)
+ return -EOPNOTSUPP;
+
+ return ar->wmi.ops->get_vdev_subtype(ar, subtype);
+}
+
+static inline int
+ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_pdev_bss_chan_info_req)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ wmi->cmd->pdev_bss_chan_info_request_cmdid);
+}
+
+static inline int
+ath10k_wmi_echo(struct ath10k *ar, u32 value)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_echo)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_echo(ar, value);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, wmi->cmd->echo_cmdid);
+}
+
+static inline int
+ath10k_wmi_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_pdev_get_tpc_table_cmdid)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_pdev_get_tpc_table_cmdid(ar, param);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->pdev_get_tpc_table_cmdid);
+}
+
+static inline int
+ath10k_wmi_report_radar_found(struct ath10k *ar,
+ const struct ath10k_radar_found_info *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_radar_found)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_radar_found(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->radar_found_cmdid);
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
new file mode 100644
index 000000000..243887fdb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -0,0 +1,4034 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "mac.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+#include "wmi-tlv.h"
+#include "p2p.h"
+#include "testmode.h"
+
+/***************/
+/* TLV helpers */
+/**************/
+
+struct wmi_tlv_policy {
+ size_t min_len;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+ [WMI_TLV_TAG_ARRAY_BYTE]
+ = { .min_len = 0 },
+ [WMI_TLV_TAG_ARRAY_UINT32]
+ = { .min_len = 0 },
+ [WMI_TLV_TAG_STRUCT_SCAN_EVENT]
+ = { .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TLV_TAG_STRUCT_MGMT_RX_HDR]
+ = { .min_len = sizeof(struct wmi_tlv_mgmt_rx_ev) },
+ [WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_start_response_event) },
+ [WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT]
+ = { .min_len = sizeof(struct wmi_host_swba_event) },
+ [WMI_TLV_TAG_STRUCT_TIM_INFO]
+ = { .min_len = sizeof(struct wmi_tim_info) },
+ [WMI_TLV_TAG_STRUCT_P2P_NOA_INFO]
+ = { .min_len = sizeof(struct wmi_p2p_noa_info) },
+ [WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_svc_rdy_ev) },
+ [WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES]
+ = { .min_len = sizeof(struct hal_reg_capabilities) },
+ [WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ]
+ = { .min_len = sizeof(struct wlan_host_mem_req) },
+ [WMI_TLV_TAG_STRUCT_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_rdy_ev) },
+ [WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_bcn_tx_status_ev) },
+ [WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_diag_data_ev) },
+ [WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_p2p_noa_ev) },
+ [WMI_TLV_TAG_STRUCT_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_roam_ev) },
+ [WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO]
+ = { .min_len = sizeof(struct wmi_tlv_wow_event_info) },
+ [WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT]
+ = { .min_len = sizeof(struct wmi_tlv_tx_pause_ev) },
+};
+
+static int
+ath10k_wmi_tlv_iter(struct ath10k *ar, const void *ptr, size_t len,
+ int (*iter)(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = __le16_to_cpu(tlv->tag);
+ tlv_len = __le16_to_cpu(tlv->len);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure of tag %hhu at byte %zd (%zu bytes left, %hhu expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+ wmi_tlv_policies[tlv_tag].min_len &&
+ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv parse failure of tag %hhu at byte %zd (%hhu bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ar, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_iter_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TLV_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_parse(struct ath10k *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath10k_wmi_tlv_iter(ar, ptr, len, ath10k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+static const void **
+ath10k_wmi_tlv_parse_alloc(struct ath10k *ar, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kcalloc(WMI_TLV_TAG_MAX, sizeof(*tb), gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath10k_wmi_tlv_parse(ar, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static u16 ath10k_wmi_tlv_len(const void *ptr)
+{
+ return __le16_to_cpu((((const struct wmi_tlv *)ptr) - 1)->len);
+}
+
+/**************/
+/* TLV events */
+/**************/
+static int ath10k_wmi_tlv_event_bcn_tx_status(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_bcn_tx_status_ev *ev;
+ struct ath10k_vif *arvif;
+ u32 vdev_id, tx_status;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ tx_status = __le32_to_cpu(ev->tx_status);
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ switch (tx_status) {
+ case WMI_TLV_BCN_TX_STATUS_OK:
+ break;
+ case WMI_TLV_BCN_TX_STATUS_XRETRY:
+ case WMI_TLV_BCN_TX_STATUS_DROP:
+ case WMI_TLV_BCN_TX_STATUS_FILTERED:
+ /* FIXME: It's probably worth telling mac80211 to stop the
+ * interface as it is crippled.
+ */
+ ath10k_warn(ar, "received bcn tmpl tx status on vdev %i: %d",
+ vdev_id, tx_status);
+ break;
+ }
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif && arvif->is_up && arvif->vif->csa_active)
+ ieee80211_queue_work(ar->hw, &arvif->ap_csa_work);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_diag_data(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_diag_data_ev *ev;
+ const struct wmi_tlv_diag_item *item;
+ const void *data;
+ int ret, num_items, len;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ num_items = __le32_to_cpu(ev->num_items);
+ len = ath10k_wmi_tlv_len(data);
+
+ while (num_items--) {
+ if (len == 0)
+ break;
+ if (len < sizeof(*item)) {
+ ath10k_warn(ar, "failed to parse diag data: can't fit item header\n");
+ break;
+ }
+
+ item = data;
+
+ if (len < sizeof(*item) + __le16_to_cpu(item->len)) {
+ ath10k_warn(ar, "failed to parse diag data: item is too long\n");
+ break;
+ }
+
+ trace_ath10k_wmi_diag_container(ar,
+ item->type,
+ __le32_to_cpu(item->timestamp),
+ __le32_to_cpu(item->code),
+ __le16_to_cpu(item->len),
+ item->payload);
+
+ len -= sizeof(*item);
+ len -= roundup(__le16_to_cpu(item->len), 4);
+
+ data += sizeof(*item);
+ data += roundup(__le16_to_cpu(item->len), 4);
+ }
+
+ if (num_items != -1 || len != 0)
+ ath10k_warn(ar, "failed to parse diag data event: num_items %d len %d\n",
+ num_items, len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_diag(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const void *data;
+ int ret, len;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+ if (!data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+ len = ath10k_wmi_tlv_len(data);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv diag event len %d\n", len);
+ trace_ath10k_wmi_diag(ar, data, len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_p2p_noa(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_p2p_noa_ev *ev;
+ const struct wmi_p2p_noa_info *noa;
+ int ret, vdev_id;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT];
+ noa = tb[WMI_TLV_TAG_STRUCT_P2P_NOA_INFO];
+
+ if (!ev || !noa) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %hhu\n",
+ vdev_id, noa->num_descriptors);
+
+ ath10k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_tx_pause(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_tlv_tx_pause_ev *ev;
+ int ret, vdev_id;
+ u32 pause_id, action, vdev_map, peer_id, tid_map;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ pause_id = __le32_to_cpu(ev->pause_id);
+ action = __le32_to_cpu(ev->action);
+ vdev_map = __le32_to_cpu(ev->vdev_map);
+ peer_id = __le32_to_cpu(ev->peer_id);
+ tid_map = __le32_to_cpu(ev->tid_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tx pause pause_id %u action %u vdev_map 0x%08x peer_id %u tid_map 0x%08x\n",
+ pause_id, action, vdev_map, peer_id, tid_map);
+
+ switch (pause_id) {
+ case WMI_TLV_TX_PAUSE_ID_MCC:
+ case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA:
+ case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PS:
+ case WMI_TLV_TX_PAUSE_ID_IBSS_PS:
+ for (vdev_id = 0; vdev_map; vdev_id++) {
+ if (!(vdev_map & BIT(vdev_id)))
+ continue;
+
+ vdev_map &= ~BIT(vdev_id);
+ ath10k_mac_handle_tx_pause_vdev(ar, vdev_id, pause_id,
+ action);
+ }
+ break;
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS:
+ case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD:
+ case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA:
+ case WMI_TLV_TX_PAUSE_ID_HOST:
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unsupported tx pause id %d\n",
+ pause_id);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac ignoring unknown tx pause vdev %d\n",
+ pause_id);
+ break;
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_event_temperature(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct wmi_tlv_pdev_temperature_event *ev;
+
+ ev = (struct wmi_tlv_pdev_temperature_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
+ return 0;
+}
+
+static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_sta *station;
+ const struct wmi_tlv_tdls_peer_event *ev;
+ const void **tb;
+ struct ath10k_vif *arvif;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ath10k_warn(ar, "tdls peer failed to parse tlv");
+ return;
+ }
+ ev = tb[WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT];
+ if (!ev) {
+ kfree(tb);
+ ath10k_warn(ar, "tdls peer NULL event");
+ return;
+ }
+
+ switch (__le32_to_cpu(ev->peer_reason)) {
+ case WMI_TDLS_TEARDOWN_REASON_TX:
+ case WMI_TDLS_TEARDOWN_REASON_RSSI:
+ case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+ rcu_read_lock();
+ station = ieee80211_find_sta_by_ifaddr(ar->hw,
+ ev->peer_macaddr.addr,
+ NULL);
+ if (!station) {
+ ath10k_warn(ar, "did not find station from tdls peer event");
+ goto exit;
+ }
+ arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
+ ieee80211_tdls_oper_request(
+ arvif->vif, station->addr,
+ NL80211_TDLS_TEARDOWN,
+ WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE,
+ GFP_ATOMIC
+ );
+ break;
+ default:
+ kfree(tb);
+ return;
+ }
+
+exit:
+ rcu_read_unlock();
+ kfree(tb);
+}
+
+/***********/
+/* TLV ops */
+/***********/
+
+static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+ bool consumed;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_TLV_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
+ switch (id) {
+ case WMI_TLV_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_TLV_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ break;
+ case WMI_TLV_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_TLV_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_TLV_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ break;
+ case WMI_TLV_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_TLV_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ break;
+ case WMI_TLV_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ break;
+ case WMI_TLV_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_TLV_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_TLV_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_TLV_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_TLV_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ break;
+ case WMI_TLV_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_TLV_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ break;
+ case WMI_TLV_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_TLV_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_TLV_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_TLV_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_TLV_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_TLV_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_TLV_PDEV_FTM_INTG_EVENTID:
+ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+ break;
+ case WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID:
+ ath10k_wmi_event_gtk_offload_status(ar, skb);
+ break;
+ case WMI_TLV_GTK_REKEY_FAIL_EVENTID:
+ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+ break;
+ case WMI_TLV_TX_DELBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_delba_complete(ar, skb);
+ break;
+ case WMI_TLV_TX_ADDBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_addba_complete(ar, skb);
+ break;
+ case WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+ break;
+ case WMI_TLV_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_TLV_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ break;
+ case WMI_TLV_SERVICE_AVAILABLE_EVENTID:
+ ath10k_wmi_event_service_available(ar, skb);
+ break;
+ case WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath10k_wmi_tlv_event_bcn_tx_status(ar, skb);
+ break;
+ case WMI_TLV_DIAG_DATA_CONTAINER_EVENTID:
+ ath10k_wmi_tlv_event_diag_data(ar, skb);
+ break;
+ case WMI_TLV_DIAG_EVENTID:
+ ath10k_wmi_tlv_event_diag(ar, skb);
+ break;
+ case WMI_TLV_P2P_NOA_EVENTID:
+ ath10k_wmi_tlv_event_p2p_noa(ar, skb);
+ break;
+ case WMI_TLV_TX_PAUSE_EVENTID:
+ ath10k_wmi_tlv_event_tx_pause(ar, skb);
+ break;
+ case WMI_TLV_PDEV_TEMPERATURE_EVENTID:
+ ath10k_wmi_tlv_event_temperature(ar, skb);
+ break;
+ case WMI_TLV_TDLS_PEER_EVENTID:
+ ath10k_wmi_event_tdls_peer(ar, skb);
+ break;
+ case WMI_TLV_MGMT_TX_COMPLETION_EVENTID:
+ ath10k_wmi_event_mgmt_tx_compl(ar, skb);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static int ath10k_wmi_tlv_op_pull_scan_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_SCAN_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->event_type = ev->event_type;
+ arg->reason = ev->reason;
+ arg->channel_freq = ev->channel_freq;
+ arg->scan_req_id = ev->scan_req_id;
+ arg->scan_id = ev->scan_id;
+ arg->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_tlv_mgmt_tx_compl_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_tx_compl_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT];
+
+ arg->desc_id = ev->desc_id;
+ arg->status = ev->status;
+ arg->pdev_id = ev->pdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_mgmt_rx_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_mgmt_rx_ev *ev;
+ const u8 *frame;
+ u32 msdu_len;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_MGMT_RX_HDR];
+ frame = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !frame) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->channel = ev->channel;
+ arg->buf_len = ev->buf_len;
+ arg->status = ev->status;
+ arg->snr = ev->snr;
+ arg->phy_mode = ev->phy_mode;
+ arg->rate = ev->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+
+ if (skb->len < (frame - skb->data) + msdu_len) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, msdu_len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_start_response_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->req_id = ev->req_id;
+ arg->resp_type = ev->resp_type;
+ arg->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_peer_kick_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+struct wmi_tlv_swba_parse {
+ const struct wmi_host_swba_event *ev;
+ bool tim_done;
+ bool noa_done;
+ size_t n_tim;
+ size_t n_noa;
+ struct wmi_swba_ev_arg *arg;
+};
+
+static int ath10k_wmi_tlv_swba_tim_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+ struct wmi_tim_info_arg *tim_info_arg;
+ const struct wmi_tim_info *tim_info_ev = ptr;
+
+ if (tag != WMI_TLV_TAG_STRUCT_TIM_INFO)
+ return -EPROTO;
+
+ if (swba->n_tim >= ARRAY_SIZE(swba->arg->tim_info))
+ return -ENOBUFS;
+
+ if (__le32_to_cpu(tim_info_ev->tim_len) >
+ sizeof(tim_info_ev->tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ tim_info_arg = &swba->arg->tim_info[swba->n_tim];
+ tim_info_arg->tim_len = tim_info_ev->tim_len;
+ tim_info_arg->tim_mcast = tim_info_ev->tim_mcast;
+ tim_info_arg->tim_bitmap = tim_info_ev->tim_bitmap;
+ tim_info_arg->tim_changed = tim_info_ev->tim_changed;
+ tim_info_arg->tim_num_ps_pending = tim_info_ev->tim_num_ps_pending;
+
+ swba->n_tim++;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_swba_noa_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+
+ if (tag != WMI_TLV_TAG_STRUCT_P2P_NOA_INFO)
+ return -EPROTO;
+
+ if (swba->n_noa >= ARRAY_SIZE(swba->arg->noa_info))
+ return -ENOBUFS;
+
+ swba->arg->noa_info[swba->n_noa++] = ptr;
+ return 0;
+}
+
+static int ath10k_wmi_tlv_swba_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_swba_parse *swba = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT:
+ swba->ev = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_STRUCT:
+ if (!swba->tim_done) {
+ swba->tim_done = true;
+ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+ ath10k_wmi_tlv_swba_tim_parse,
+ swba);
+ if (ret)
+ return ret;
+ } else if (!swba->noa_done) {
+ swba->noa_done = true;
+ ret = ath10k_wmi_tlv_iter(ar, ptr, len,
+ ath10k_wmi_tlv_swba_noa_parse,
+ swba);
+ if (ret)
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_tlv_swba_parse swba = { .arg = arg };
+ u32 map;
+ size_t n_vdevs;
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_swba_parse, &swba);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ if (!swba.ev)
+ return -EPROTO;
+
+ arg->vdev_map = swba.ev->vdev_map;
+
+ for (map = __le32_to_cpu(arg->vdev_map), n_vdevs = 0; map; map >>= 1)
+ if (map & BIT(0))
+ n_vdevs++;
+
+ if (n_vdevs != swba.n_tim ||
+ n_vdevs != swba.n_noa)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_phyerr_ev *ev;
+ const void *phyerrs;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR];
+ phyerrs = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !phyerrs) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
+ arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+ arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+ arg->buf_len = __le32_to_cpu(ev->buf_len);
+ arg->phyerrs = phyerrs;
+
+ kfree(tb);
+ return 0;
+}
+
+#define WMI_TLV_ABI_VER_NS0 0x5F414351
+#define WMI_TLV_ABI_VER_NS1 0x00004C4D
+#define WMI_TLV_ABI_VER_NS2 0x00000000
+#define WMI_TLV_ABI_VER_NS3 0x00000000
+
+#define WMI_TLV_ABI_VER0_MAJOR 1
+#define WMI_TLV_ABI_VER0_MINOR 0
+#define WMI_TLV_ABI_VER0 ((((WMI_TLV_ABI_VER0_MAJOR) << 24) & 0xFF000000) | \
+ (((WMI_TLV_ABI_VER0_MINOR) << 0) & 0x00FFFFFF))
+#define WMI_TLV_ABI_VER1 53
+
+static int
+ath10k_wmi_tlv_parse_mem_reqs(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_svc_rdy_ev_arg *arg = data;
+ int i;
+
+ if (tag != WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ)
+ return -EPROTO;
+
+ for (i = 0; i < ARRAY_SIZE(arg->mem_reqs); i++) {
+ if (!arg->mem_reqs[i]) {
+ arg->mem_reqs[i] = ptr;
+ return 0;
+ }
+ }
+
+ return -ENOMEM;
+}
+
+struct wmi_tlv_svc_rdy_parse {
+ const struct hal_reg_capabilities *reg;
+ const struct wmi_tlv_svc_rdy_ev *ev;
+ const __le32 *svc_bmap;
+ const struct wlan_host_mem_req *mem_reqs;
+ bool svc_bmap_done;
+ bool dbs_hw_mode_done;
+};
+
+static int ath10k_wmi_tlv_svc_rdy_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_parse *svc_rdy = data;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT:
+ svc_rdy->ev = ptr;
+ break;
+ case WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES:
+ svc_rdy->reg = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_STRUCT:
+ svc_rdy->mem_reqs = ptr;
+ break;
+ case WMI_TLV_TAG_ARRAY_UINT32:
+ if (!svc_rdy->svc_bmap_done) {
+ svc_rdy->svc_bmap_done = true;
+ svc_rdy->svc_bmap = ptr;
+ } else if (!svc_rdy->dbs_hw_mode_done) {
+ svc_rdy->dbs_hw_mode_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_rdy_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ const struct hal_reg_capabilities *reg;
+ const struct wmi_tlv_svc_rdy_ev *ev;
+ const __le32 *svc_bmap;
+ const struct wlan_host_mem_req *mem_reqs;
+ struct wmi_tlv_svc_rdy_parse svc_rdy = { };
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_svc_rdy_parse, &svc_rdy);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = svc_rdy.ev;
+ reg = svc_rdy.reg;
+ svc_bmap = svc_rdy.svc_bmap;
+ mem_reqs = svc_rdy.mem_reqs;
+
+ if (!ev || !reg || !svc_bmap || !mem_reqs)
+ return -EPROTO;
+
+ /* This is an internal ABI compatibility check for WMI TLV so check it
+ * here instead of the generic WMI code.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv abi 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x, 0x%08x ?= 0x%08x\n",
+ __le32_to_cpu(ev->abi.abi_ver0), WMI_TLV_ABI_VER0,
+ __le32_to_cpu(ev->abi.abi_ver_ns0), WMI_TLV_ABI_VER_NS0,
+ __le32_to_cpu(ev->abi.abi_ver_ns1), WMI_TLV_ABI_VER_NS1,
+ __le32_to_cpu(ev->abi.abi_ver_ns2), WMI_TLV_ABI_VER_NS2,
+ __le32_to_cpu(ev->abi.abi_ver_ns3), WMI_TLV_ABI_VER_NS3);
+
+ if (__le32_to_cpu(ev->abi.abi_ver0) != WMI_TLV_ABI_VER0 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns0) != WMI_TLV_ABI_VER_NS0 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns1) != WMI_TLV_ABI_VER_NS1 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns2) != WMI_TLV_ABI_VER_NS2 ||
+ __le32_to_cpu(ev->abi.abi_ver_ns3) != WMI_TLV_ABI_VER_NS3) {
+ return -ENOTSUPP;
+ }
+
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->abi.abi_ver0;
+ arg->sw_ver1 = ev->abi.abi_ver1;
+ arg->fw_build = ev->fw_build_vers;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = reg->eeprom_rd;
+ arg->low_5ghz_chan = reg->low_5ghz_chan;
+ arg->high_5ghz_chan = reg->high_5ghz_chan;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = svc_bmap;
+ arg->service_map_len = ath10k_wmi_tlv_len(svc_bmap);
+
+ ret = ath10k_wmi_tlv_iter(ar, mem_reqs, ath10k_wmi_tlv_len(mem_reqs),
+ ath10k_wmi_tlv_parse_mem_reqs, arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mem_reqs tlv: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_rdy_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_rdy_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_READY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->sw_version = ev->abi.abi_ver0;
+ arg->abi_version = ev->abi.abi_ver1;
+ arg->status = ev->status;
+ arg->mac_addr = ev->mac_addr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_svc_avail_parse(struct ath10k *ar, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_svc_avail_ev_arg *arg = data;
+
+ switch (tag) {
+ case WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT:
+ arg->service_map_ext_valid = true;
+ arg->service_map_ext_len = *(__le32 *)ptr;
+ arg->service_map_ext = ptr + sizeof(__le32);
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_svc_avail(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_svc_avail_ev_arg *arg)
+{
+ int ret;
+
+ ret = ath10k_wmi_tlv_iter(ar, skb->data, skb->len,
+ ath10k_wmi_tlv_svc_avail_parse, arg);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to parse svc_avail tlv: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath10k_wmi_tlv_pull_vdev_stats(const struct wmi_tlv_vdev_stats *src,
+ struct ath10k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = __le32_to_cpu(src->vdev_id);
+ dst->beacon_snr = __le32_to_cpu(src->beacon_snr);
+ dst->data_snr = __le32_to_cpu(src->data_snr);
+ dst->num_rx_frames = __le32_to_cpu(src->num_rx_frames);
+ dst->num_rts_fail = __le32_to_cpu(src->num_rts_fail);
+ dst->num_rts_success = __le32_to_cpu(src->num_rts_success);
+ dst->num_rx_err = __le32_to_cpu(src->num_rx_err);
+ dst->num_rx_discard = __le32_to_cpu(src->num_rx_discard);
+ dst->num_tx_not_acked = __le32_to_cpu(src->num_tx_not_acked);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+ dst->num_tx_frames[i] =
+ __le32_to_cpu(src->num_tx_frames[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+ dst->num_tx_frames_retries[i] =
+ __le32_to_cpu(src->num_tx_frames_retries[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+ dst->num_tx_frames_failures[i] =
+ __le32_to_cpu(src->num_tx_frames_failures[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+ dst->tx_rate_history[i] =
+ __le32_to_cpu(src->tx_rate_history[i]);
+
+ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+ dst->beacon_rssi_history[i] =
+ __le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
+static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const void **tb;
+ const struct wmi_tlv_stats_ev *ev;
+ const void *data;
+ u32 num_pdev_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 num_chan_stats;
+ size_t data_len;
+ int ret;
+ int i;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_STATS_EVENT];
+ data = tb[WMI_TLV_TAG_ARRAY_BYTE];
+
+ if (!ev || !data) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data_len = ath10k_wmi_tlv_len(data);
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+ num_chan_stats = __le32_to_cpu(ev->num_chan_stats);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv stats update pdev %i vdev %i peer %i bcnflt %i chan %i\n",
+ num_pdev_stats, num_vdev_stats, num_peer_stats,
+ num_bcnflt_stats, num_chan_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = data;
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_vdev_stats; i++) {
+ const struct wmi_tlv_vdev_stats *src;
+ struct ath10k_fw_stats_vdev *dst;
+
+ src = data;
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_tlv_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10x_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = data;
+ if (data_len < sizeof(*src)) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ data += sizeof(*src);
+ data_len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_roam_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_roam_ev *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ROAM_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+ arg->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath10k_wmi_tlv_op_pull_wow_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_wow_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_tlv_wow_event_info *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = __le32_to_cpu(ev->vdev_id);
+ arg->flag = __le32_to_cpu(ev->flag);
+ arg->wake_reason = __le32_to_cpu(ev->wake_reason);
+ arg->data_len = __le32_to_cpu(ev->data_len);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath10k_wmi_tlv_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ const void **tb;
+ const struct wmi_echo_event *ev;
+ int ret;
+
+ tb = ath10k_wmi_tlv_parse_alloc(ar, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath10k_warn(ar, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TLV_TAG_STRUCT_ECHO_EVENT];
+ if (!ev) {
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->value = ev->value;
+
+ kfree(tb);
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_suspend(struct ath10k *ar, u32 opt)
+{
+ struct wmi_tlv_pdev_suspend *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->opt = __cpu_to_le32(opt);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev suspend\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_resume(struct ath10k *ar)
+{
+ struct wmi_tlv_resume_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->reserved = __cpu_to_le32(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev resume\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_rd(struct ath10k *ar,
+ u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_tlv_pdev_set_rd_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->regd = __cpu_to_le32(rd);
+ cmd->regd_2ghz = __cpu_to_le32(rd2g);
+ cmd->regd_5ghz = __cpu_to_le32(rd5g);
+ cmd->conform_limit_2ghz = __cpu_to_le32(ctl2g);
+ cmd->conform_limit_5ghz = __cpu_to_le32(ctl5g);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set rd\n");
+ return skb;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_tlv_txbf_conf_scheme(struct ath10k *ar)
+{
+ return WMI_TXBF_CONF_AFTER_ASSOC;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_param(struct ath10k *ar, u32 param_id,
+ u32 param_value)
+{
+ struct wmi_tlv_pdev_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set param\n");
+ return skb;
+}
+
+static struct sk_buff *ath10k_wmi_tlv_op_gen_init(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ struct wmi_tlv_init_cmd *cmd;
+ struct wmi_tlv_resource_config *cfg;
+ struct wmi_host_mem_chunks *chunks;
+ size_t len, chunks_len;
+ void *ptr;
+
+ chunks_len = ar->wmi.num_mem_chunks * sizeof(struct host_memory_chunk);
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + sizeof(*cfg)) +
+ (sizeof(*tlv) + chunks_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_INIT_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG);
+ tlv->len = __cpu_to_le16(sizeof(*cfg));
+ cfg = (void *)tlv->value;
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cfg);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(chunks_len);
+ chunks = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ ptr += chunks_len;
+
+ cmd->abi.abi_ver0 = __cpu_to_le32(WMI_TLV_ABI_VER0);
+ cmd->abi.abi_ver1 = __cpu_to_le32(WMI_TLV_ABI_VER1);
+ cmd->abi.abi_ver_ns0 = __cpu_to_le32(WMI_TLV_ABI_VER_NS0);
+ cmd->abi.abi_ver_ns1 = __cpu_to_le32(WMI_TLV_ABI_VER_NS1);
+ cmd->abi.abi_ver_ns2 = __cpu_to_le32(WMI_TLV_ABI_VER_NS2);
+ cmd->abi.abi_ver_ns3 = __cpu_to_le32(WMI_TLV_ABI_VER_NS3);
+ cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ cfg->num_vdevs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+
+ cfg->num_peers = __cpu_to_le32(ar->hw_params.num_peers);
+ cfg->ast_skid_limit = __cpu_to_le32(ar->hw_params.ast_skid_limit);
+ cfg->num_wds_entries = __cpu_to_le32(ar->hw_params.num_wds_entries);
+
+ if (test_bit(WMI_SERVICE_RX_FULL_REORDER, ar->wmi.svc_map)) {
+ cfg->num_offload_peers = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ } else {
+ cfg->num_offload_peers = __cpu_to_le32(0);
+ cfg->num_offload_reorder_bufs = __cpu_to_le32(0);
+ }
+
+ cfg->num_peer_keys = __cpu_to_le32(2);
+ cfg->num_tids = __cpu_to_le32(TARGET_TLV_NUM_TIDS);
+ cfg->tx_chain_mask = __cpu_to_le32(0x7);
+ cfg->rx_chain_mask = __cpu_to_le32(0x7);
+ cfg->rx_timeout_pri[0] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[1] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[2] = __cpu_to_le32(0x64);
+ cfg->rx_timeout_pri[3] = __cpu_to_le32(0x28);
+ cfg->rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+ cfg->scan_max_pending_reqs = __cpu_to_le32(4);
+ cfg->bmiss_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->roam_offload_max_vdev = __cpu_to_le32(TARGET_TLV_NUM_VDEVS);
+ cfg->roam_offload_max_ap_profiles = __cpu_to_le32(8);
+ cfg->num_mcast_groups = __cpu_to_le32(0);
+ cfg->num_mcast_table_elems = __cpu_to_le32(0);
+ cfg->mcast2ucast_mode = __cpu_to_le32(0);
+ cfg->tx_dbg_log_size = __cpu_to_le32(0x400);
+ cfg->dma_burst_size = __cpu_to_le32(0);
+ cfg->mac_aggr_delim = __cpu_to_le32(0);
+ cfg->rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(0);
+ cfg->vow_config = __cpu_to_le32(0);
+ cfg->gtk_offload_max_vdev = __cpu_to_le32(2);
+ cfg->num_msdu_desc = __cpu_to_le32(TARGET_TLV_NUM_MSDU_DESC);
+ cfg->max_frag_entries = __cpu_to_le32(2);
+ cfg->num_tdls_vdevs = __cpu_to_le32(TARGET_TLV_NUM_TDLS_VDEVS);
+ cfg->num_tdls_conn_table_entries = __cpu_to_le32(0x20);
+ cfg->beacon_tx_offload_max_vdev = __cpu_to_le32(2);
+ cfg->num_multicast_filter_entries = __cpu_to_le32(5);
+ cfg->num_wow_filters = __cpu_to_le32(ar->wow.max_num_patterns);
+ cfg->num_keep_alive_pattern = __cpu_to_le32(6);
+ cfg->keep_alive_pattern_size = __cpu_to_le32(0);
+ cfg->max_tdls_concurrent_sleep_sta = __cpu_to_le32(1);
+ cfg->max_tdls_concurrent_buffer_sta = __cpu_to_le32(1);
+ cfg->wmi_send_separate = __cpu_to_le32(0);
+ cfg->num_ocb_vdevs = __cpu_to_le32(0);
+ cfg->num_ocb_channels = __cpu_to_le32(0);
+ cfg->num_ocb_schedules = __cpu_to_le32(0);
+ cfg->host_capab = __cpu_to_le32(0);
+
+ ath10k_wmi_put_host_mem_chunks(ar, chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv init\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_tlv_start_scan_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, chan_len, ssid_len, bssid_len, ie_len;
+ __le32 *chans;
+ struct wmi_ssid *ssids;
+ struct wmi_mac_addr *addrs;
+ void *ptr;
+ int i, ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ chan_len = arg->n_channels * sizeof(__le32);
+ ssid_len = arg->n_ssids * sizeof(struct wmi_ssid);
+ bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
+ ie_len = roundup(arg->ie_len, 4);
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ sizeof(*tlv) + chan_len +
+ sizeof(*tlv) + ssid_len +
+ sizeof(*tlv) + bssid_len +
+ sizeof(*tlv) + ie_len;
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_START_SCAN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ cmd->burst_duration_ms = __cpu_to_le32(arg->burst_duration_ms);
+ cmd->num_channels = __cpu_to_le32(arg->n_channels);
+ cmd->num_ssids = __cpu_to_le32(arg->n_ssids);
+ cmd->num_bssids = __cpu_to_le32(arg->n_bssids);
+ cmd->ie_len = __cpu_to_le32(arg->ie_len);
+ cmd->num_probes = __cpu_to_le32(3);
+ ether_addr_copy(cmd->mac_addr.addr, arg->mac_addr.addr);
+ ether_addr_copy(cmd->mac_mask.addr, arg->mac_mask.addr);
+
+ /* FIXME: There are some scan flag inconsistencies across firmwares,
+ * e.g. WMI-TLV inverts the logic behind the following flag.
+ */
+ cmd->common.scan_ctrl_flags ^= __cpu_to_le32(WMI_SCAN_FILTER_PROBE_REQ);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(chan_len);
+ chans = (void *)tlv->value;
+ for (i = 0; i < arg->n_channels; i++)
+ chans[i] = __cpu_to_le32(arg->channels[i]);
+
+ ptr += sizeof(*tlv);
+ ptr += chan_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+ tlv->len = __cpu_to_le16(ssid_len);
+ ssids = (void *)tlv->value;
+ for (i = 0; i < arg->n_ssids; i++) {
+ ssids[i].ssid_len = __cpu_to_le32(arg->ssids[i].len);
+ memcpy(ssids[i].ssid, arg->ssids[i].ssid, arg->ssids[i].len);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += ssid_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_FIXED_STRUCT);
+ tlv->len = __cpu_to_le16(bssid_len);
+ addrs = (void *)tlv->value;
+ for (i = 0; i < arg->n_bssids; i++)
+ ether_addr_copy(addrs[i].addr, arg->bssids[i].bssid);
+
+ ptr += sizeof(*tlv);
+ ptr += bssid_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(ie_len);
+ memcpy(tlv->value, arg->ie, arg->ie_len);
+
+ ptr += sizeof(*tlv);
+ ptr += ie_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv start scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_stop_scan(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg)
+{
+ struct wmi_stop_scan_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 scan_id;
+ u32 req_id;
+
+ if (arg->req_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ scan_id = arg->u.scan_id;
+ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+ req_id = arg->req_id;
+ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->req_type = __cpu_to_le32(arg->req_type);
+ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
+ cmd->scan_id = __cpu_to_le32(scan_id);
+ cmd->scan_req_id = __cpu_to_le32(req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv stop scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_create(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_vdev_type vdev_type,
+ enum wmi_vdev_subtype vdev_subtype,
+ const u8 mac_addr[ETH_ALEN])
+{
+ struct wmi_vdev_create_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_type = __cpu_to_le32(vdev_type);
+ cmd->vdev_subtype = __cpu_to_le32(vdev_subtype);
+ ether_addr_copy(cmd->vdev_macaddr.addr, mac_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev create\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_delete_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev delete\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart)
+{
+ struct wmi_tlv_vdev_start_cmd *cmd;
+ struct wmi_channel *ch;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+ u32 flags = 0;
+
+ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return ERR_PTR(-EINVAL);
+
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + sizeof(*ch)) +
+ (sizeof(*tlv) + 0);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (arg->hidden_ssid)
+ flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ flags |= WMI_VDEV_START_PMF_ENABLED;
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->bcn_intval = __cpu_to_le32(arg->bcn_intval);
+ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
+ cmd->flags = __cpu_to_le32(flags);
+ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
+ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
+ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
+
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*ch));
+ ch = (void *)tlv->value;
+ ath10k_wmi_put_wmi_channel(ch, &arg->channel);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*ch);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = 0;
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+ ptr += 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev start\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_stop_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev stop\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid)
+
+{
+ struct wmi_vdev_up_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_UP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = __cpu_to_le32(aid);
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev up\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_down_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev down\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev set param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+ return ERR_PTR(-EINVAL);
+ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + roundup(arg->key_len, sizeof(__le32));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->key_idx = __cpu_to_le32(arg->key_idx);
+ cmd->key_flags = __cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
+ cmd->key_len = __cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->macaddr)
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(arg->key_len, sizeof(__le32)));
+ if (arg->key_data)
+ memcpy(tlv->value, arg->key_data, arg->key_len);
+
+ ptr += sizeof(*tlv);
+ ptr += roundup(arg->key_len, sizeof(__le32));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev install key\n");
+ return skb;
+}
+
+static void *ath10k_wmi_tlv_put_uapsd_ac(struct ath10k *ar, void *ptr,
+ const struct wmi_sta_uapsd_auto_trig_arg *arg)
+{
+ struct wmi_sta_uapsd_auto_trig_param *ac;
+ struct wmi_tlv *tlv;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM);
+ tlv->len = __cpu_to_le16(sizeof(*ac));
+ ac = (void *)tlv->value;
+
+ ac->wmm_ac = __cpu_to_le32(arg->wmm_ac);
+ ac->user_priority = __cpu_to_le32(arg->user_priority);
+ ac->service_interval = __cpu_to_le32(arg->service_interval);
+ ac->suspend_interval = __cpu_to_le32(arg->suspend_interval);
+ ac->delay_interval = __cpu_to_le32(arg->delay_interval);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv vdev sta uapsd auto trigger ac %d prio %d svc int %d susp int %d delay int %d\n",
+ ac->wmm_ac, ac->user_priority, ac->service_interval,
+ ac->suspend_interval, ac->delay_interval);
+
+ return ptr + sizeof(*tlv) + sizeof(*ac);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ const struct wmi_sta_uapsd_auto_trig_arg *args,
+ u32 num_ac)
+{
+ struct wmi_sta_uapsd_auto_trig_cmd_fixed_param *cmd;
+ struct wmi_sta_uapsd_auto_trig_param *ac;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ size_t ac_tlv_len;
+ void *ptr;
+ int i;
+
+ ac_tlv_len = num_ac * (sizeof(*tlv) + sizeof(*ac));
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + ac_tlv_len;
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->num_ac = __cpu_to_le32(num_ac);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(ac_tlv_len);
+ ac = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ for (i = 0; i < num_ac; i++)
+ ptr = ath10k_wmi_tlv_put_uapsd_ac(ar, ptr, &args[i]);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev sta uapsd auto trigger\n");
+ return skb;
+}
+
+static void *ath10k_wmi_tlv_put_wmm(void *ptr,
+ const struct wmi_wmm_params_arg *arg)
+{
+ struct wmi_wmm_params *wmm;
+ struct wmi_tlv *tlv;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WMM_PARAMS);
+ tlv->len = __cpu_to_le16(sizeof(*wmm));
+ wmm = (void *)tlv->value;
+ ath10k_wmi_set_wmm_param(wmm, arg);
+
+ return ptr + sizeof(*tlv) + sizeof(*wmm);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_tlv_vdev_set_wmm_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[0].params, &arg->ac_be);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[1].params, &arg->ac_bk);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[2].params, &arg->ac_vi);
+ ath10k_wmi_set_wmm_param(&cmd->vdev_wmm_params[3].params, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv vdev wmm conf\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_sta_keepalive(struct ath10k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct wmi_tlv_sta_keepalive_cmd *cmd;
+ struct wmi_sta_keepalive_arp_resp *arp;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*arp);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->enabled = __cpu_to_le32(arg->enabled);
+ cmd->method = __cpu_to_le32(arg->method);
+ cmd->interval = __cpu_to_le32(arg->interval);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE);
+ tlv->len = __cpu_to_le16(sizeof(*arp));
+ arp = (void *)tlv->value;
+
+ arp->src_ip4_addr = arg->src_ip4_addr;
+ arp->dest_ip4_addr = arg->dest_ip4_addr;
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
+{
+ struct wmi_tlv_peer_create_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_type = __cpu_to_le32(peer_type);
+ ether_addr_copy(cmd->peer_addr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer create\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_delete_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer delete\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer flush\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value)
+{
+ struct wmi_peer_set_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer set param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_tlv_peer_assoc_cmd *cmd;
+ struct wmi_vht_rate_set *vht_rate;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, legacy_rate_len, ht_rate_len;
+ void *ptr;
+
+ if (arg->peer_mpdu_density > 16)
+ return ERR_PTR(-EINVAL);
+ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+ return ERR_PTR(-EINVAL);
+ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+ return ERR_PTR(-EINVAL);
+
+ legacy_rate_len = roundup(arg->peer_legacy_rates.num_rates,
+ sizeof(__le32));
+ ht_rate_len = roundup(arg->peer_ht_rates.num_rates, sizeof(__le32));
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + legacy_rate_len) +
+ (sizeof(*tlv) + ht_rate_len) +
+ (sizeof(*tlv) + sizeof(*vht_rate));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+ cmd->assoc_id = __cpu_to_le32(arg->peer_aid);
+ cmd->flags = __cpu_to_le32(arg->peer_flags);
+ cmd->caps = __cpu_to_le32(arg->peer_caps);
+ cmd->listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+ cmd->ht_caps = __cpu_to_le32(arg->peer_ht_caps);
+ cmd->max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
+ cmd->mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
+ cmd->rate_caps = __cpu_to_le32(arg->peer_rate_caps);
+ cmd->nss = __cpu_to_le32(arg->peer_num_spatial_streams);
+ cmd->vht_caps = __cpu_to_le32(arg->peer_vht_caps);
+ cmd->phy_mode = __cpu_to_le32(arg->peer_phymode);
+ cmd->num_legacy_rates = __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ cmd->num_ht_rates = __cpu_to_le32(arg->peer_ht_rates.num_rates);
+ ether_addr_copy(cmd->mac_addr.addr, arg->addr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(legacy_rate_len);
+ memcpy(tlv->value, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ ptr += sizeof(*tlv);
+ ptr += legacy_rate_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(ht_rate_len);
+ memcpy(tlv->value, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ ptr += sizeof(*tlv);
+ ptr += ht_rate_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VHT_RATE_SET);
+ tlv->len = __cpu_to_le16(sizeof(*vht_rate));
+ vht_rate = (void *)tlv->value;
+
+ vht_rate->rx_max_rate = __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+ vht_rate->rx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+ vht_rate->tx_max_rate = __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+ vht_rate->tx_mcs_set = __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*vht_rate);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv peer assoc\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct wmi_sta_powersave_mode_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set psmode\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 param_value)
+{
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv set sta ps\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv ap ps param\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct wmi_tlv_scan_chan_list_cmd *cmd;
+ struct wmi_channel *ci;
+ struct wmi_channel_arg *ch;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t chans_len, len;
+ int i;
+ void *ptr, *chans;
+
+ chans_len = arg->n_channels * (sizeof(*tlv) + sizeof(*ci));
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (sizeof(*tlv) + chans_len);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(chans_len);
+ chans = (void *)tlv->value;
+
+ for (i = 0; i < arg->n_channels; i++) {
+ ch = &arg->channels[i];
+
+ tlv = chans;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*ci));
+ ci = (void *)tlv->value;
+
+ ath10k_wmi_put_wmi_channel(ci, ch);
+
+ chans += sizeof(*tlv);
+ chans += sizeof(*ci);
+ }
+
+ ptr += sizeof(*tlv);
+ ptr += chans_len;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan chan list\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_scan_prob_req_oui(struct ath10k *ar, u32 prob_req_oui)
+{
+ struct wmi_scan_prob_req_oui_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->prob_req_oui = __cpu_to_le32(prob_req_oui);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv scan prob req oui\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id,
+ const void *bcn, size_t bcn_len,
+ u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+
+{
+ struct wmi_bcn_tx_ref_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ hdr = (struct ieee80211_hdr *)bcn;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->data_len = __cpu_to_le32(bcn_len);
+ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
+ cmd->msdu_id = 0;
+ cmd->frame_control = __cpu_to_le32(fc);
+ cmd->flags = 0;
+
+ if (dtim_zero)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+ if (deliver_cab)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv beacon dma\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_wmm(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_tlv_pdev_set_wmm_cmd *cmd;
+ struct wmi_wmm_params *wmm;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+ void *ptr;
+
+ len = (sizeof(*tlv) + sizeof(*cmd)) +
+ (4 * (sizeof(*tlv) + sizeof(*wmm)));
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ /* nothing to set here */
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_be);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_bk);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vi);
+ ptr = ath10k_wmi_tlv_put_wmm(ptr, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pdev set wmm\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+ struct wmi_request_stats_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->stats_id = __cpu_to_le32(stats_mask);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv request stats\n");
+ return skb;
+}
+
+static int
+ath10k_wmi_tlv_op_cleanup_mgmt_tx_send(struct ath10k *ar,
+ struct sk_buff *msdu)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_wmi *wmi = &ar->wmi;
+
+ idr_remove(&wmi->mgmt_pending_tx, cb->msdu_id);
+
+ return 0;
+}
+
+static int
+ath10k_wmi_mgmt_tx_alloc_msdu_id(struct ath10k *ar, struct sk_buff *skb,
+ dma_addr_t paddr)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ int ret;
+
+ pkt_addr = kmalloc(sizeof(*pkt_addr), GFP_ATOMIC);
+ if (!pkt_addr)
+ return -ENOMEM;
+
+ pkt_addr->vaddr = skb;
+ pkt_addr->paddr = paddr;
+
+ spin_lock_bh(&ar->data_lock);
+ ret = idr_alloc(&wmi->mgmt_pending_tx, pkt_addr, 0,
+ wmi->mgmt_max_num_pending_tx, GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx alloc msdu_id ret %d\n", ret);
+ return ret;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
+ dma_addr_t paddr)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct wmi_tlv_mgmt_tx_cmd *cmd;
+ struct ieee80211_hdr *hdr;
+ struct ath10k_vif *arvif;
+ u32 buf_len = msdu->len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int len, desc_id;
+ u32 vdev_id;
+ void *ptr;
+
+ if (!cb->vif)
+ return ERR_PTR(-EINVAL);
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ arvif = (void *)cb->vif->drv_priv;
+ vdev_id = arvif->vdev_id;
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*cmd) + 2 * sizeof(*tlv);
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
+ buf_len = min_t(u32, buf_len, WMI_TLV_MGMT_TX_FRAME_MAX_LEN);
+ buf_len = round_up(buf_len, 4);
+
+ len += buf_len;
+ len = round_up(len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ desc_id = ath10k_wmi_mgmt_tx_alloc_msdu_id(ar, msdu, paddr);
+ if (desc_id < 0)
+ goto err_free_skb;
+
+ cb->msdu_id = desc_id;
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_MGMT_TX_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->desc_id = __cpu_to_le32(desc_id);
+ cmd->chanfreq = 0;
+ cmd->buf_len = __cpu_to_le32(buf_len);
+ cmd->frame_len = __cpu_to_le32(msdu->len);
+ cmd->paddr = __cpu_to_le64(paddr);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(buf_len);
+
+ ptr += sizeof(*tlv);
+ memcpy(ptr, msdu->data, buf_len);
+
+ return skb;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+ return ERR_PTR(desc_id);
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type,
+ u32 delay_ms)
+{
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->type = __cpu_to_le32(type);
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv force fw hang\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+ u32 log_level)
+{
+ struct wmi_tlv_dbglog_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len, bmap_len;
+ u32 value;
+ void *ptr;
+
+ if (module_enable) {
+ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+ module_enable,
+ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE);
+ } else {
+ value = WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(
+ WMI_TLV_DBGLOG_ALL_MODULES,
+ WMI_TLV_DBGLOG_LOG_LEVEL_WARN);
+ }
+
+ bmap_len = 0;
+ len = sizeof(*tlv) + sizeof(*cmd) + sizeof(*tlv) + bmap_len;
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->param = __cpu_to_le32(WMI_TLV_DBGLOG_PARAM_LOG_LEVEL);
+ cmd->value = __cpu_to_le32(value);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(bmap_len);
+
+ /* nothing to do here */
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(bmap_len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv dbglog value 0x%08x\n", value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_enable(struct ath10k *ar, u32 filter)
+{
+ struct wmi_tlv_pktlog_enable *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->filter = __cpu_to_le32(filter);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog enable filter 0x%08x\n",
+ filter);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_get_temperature(struct ath10k *ar)
+{
+ struct wmi_tlv_pdev_get_temp_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature tlv\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pktlog_disable(struct ath10k *ar)
+{
+ struct wmi_tlv_pktlog_disable *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv pktlog disable\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_bcn_tmpl(struct ath10k *ar, u32 vdev_id,
+ u32 tim_ie_offset, struct sk_buff *bcn,
+ u32 prb_caps, u32 prb_erp, void *prb_ies,
+ size_t prb_ies_len)
+{
+ struct wmi_tlv_bcn_tmpl_cmd *cmd;
+ struct wmi_tlv_bcn_prb_info *info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ if (WARN_ON(prb_ies_len > 0 && !prb_ies))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*info) + prb_ies_len +
+ sizeof(*tlv) + roundup(bcn->len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->tim_ie_offset = __cpu_to_le32(tim_ie_offset);
+ cmd->buf_len = __cpu_to_le32(bcn->len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* FIXME: prb_ies_len should be probably aligned to 4byte boundary but
+ * then it is then impossible to pass original ie len.
+ * This chunk is not used yet so if setting probe resp template yields
+ * problems with beaconing or crashes firmware look here.
+ */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+ tlv->len = __cpu_to_le16(sizeof(*info) + prb_ies_len);
+ info = (void *)tlv->value;
+ info->caps = __cpu_to_le32(prb_caps);
+ info->erp = __cpu_to_le32(prb_erp);
+ memcpy(info->ies, prb_ies, prb_ies_len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*info);
+ ptr += prb_ies_len;
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(bcn->len, 4));
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ /* FIXME: Adjust TSF? */
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv bcn tmpl vdev_id %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_prb_tmpl(struct ath10k *ar, u32 vdev_id,
+ struct sk_buff *prb)
+{
+ struct wmi_tlv_prb_tmpl_cmd *cmd;
+ struct wmi_tlv_bcn_prb_info *info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*info) +
+ sizeof(*tlv) + roundup(prb->len, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->buf_len = __cpu_to_le32(prb->len);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_BCN_PRB_INFO);
+ tlv->len = __cpu_to_le16(sizeof(*info));
+ info = (void *)tlv->value;
+ info->caps = 0;
+ info->erp = 0;
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*info);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(prb->len, 4));
+ memcpy(tlv->value, prb->data, prb->len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv prb tmpl vdev_id %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct wmi_tlv_p2p_go_bcn_ie *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + roundup(p2p_ie[1] + 2, 4);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->ie_len = __cpu_to_le32(p2p_ie[1] + 2);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_BYTE);
+ tlv->len = __cpu_to_le16(roundup(p2p_ie[1] + 2, 4));
+ memcpy(tlv->value, p2p_ie, p2p_ie[1] + 2);
+
+ ptr += sizeof(*tlv);
+ ptr += roundup(p2p_ie[1] + 2, 4);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv p2p go bcn ie for vdev %i\n",
+ vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct wmi_tdls_set_state_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+ /* Set to options from wmi_tlv_tdls_options,
+ * for now none of them are enabled.
+ */
+ u32 options = 0;
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
+ options |= WMI_TLV_TDLS_BUFFER_STA_EN;
+
+ /* WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL means firm will handle TDLS
+ * link inactivity detecting logic.
+ */
+ if (state == WMI_TDLS_ENABLE_ACTIVE)
+ state = WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->state = __cpu_to_le32(state);
+ cmd->notification_interval_ms = __cpu_to_le32(5000);
+ cmd->tx_discovery_threshold = __cpu_to_le32(100);
+ cmd->tx_teardown_threshold = __cpu_to_le32(5);
+ cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+ cmd->rssi_delta = __cpu_to_le32(-20);
+ cmd->tdls_options = __cpu_to_le32(options);
+ cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+ cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+ cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+ cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+ cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv update fw tdls state %d for vdev %i\n",
+ state, vdev_id);
+ return skb;
+}
+
+static u32 ath10k_wmi_tlv_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+ u32 peer_qos = 0;
+
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VO;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_VI;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BK;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ peer_qos |= WMI_TLV_TDLS_PEER_QOS_AC_BE;
+
+ peer_qos |= SM(sp, WMI_TLV_TDLS_PEER_SP);
+
+ return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan_arg)
+{
+ struct wmi_tdls_peer_update_cmd *cmd;
+ struct wmi_tdls_peer_capab *peer_cap;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 peer_qos;
+ void *ptr;
+ int len;
+ int i;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + sizeof(*peer_cap) +
+ sizeof(*tlv) + cap->peer_chan_len * sizeof(*chan);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+ cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES);
+ tlv->len = __cpu_to_le16(sizeof(*peer_cap));
+ peer_cap = (void *)tlv->value;
+ peer_qos = ath10k_wmi_tlv_prepare_peer_qos(cap->peer_uapsd_queues,
+ cap->peer_max_sp);
+ peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+ peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+ peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+ peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+ peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+ peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+ peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+ for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+ peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+ peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+ peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+ peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*peer_cap);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(cap->peer_chan_len * sizeof(*chan));
+
+ ptr += sizeof(*tlv);
+
+ for (i = 0; i < cap->peer_chan_len; i++) {
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_CHANNEL);
+ tlv->len = __cpu_to_le16(sizeof(*chan));
+ chan = (void *)tlv->value;
+ ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*chan);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv tdls peer update vdev %i state %d n_chans %u\n",
+ arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
+ u32 duration, u32 next_offset,
+ u32 enabled)
+{
+ struct wmi_tlv_set_quiet_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*tlv) + sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (void *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ /* vdev_id is not in use, set to 0 */
+ cmd->vdev_id = __cpu_to_le32(0);
+ cmd->period = __cpu_to_le32(period);
+ cmd->duration = __cpu_to_le32(duration);
+ cmd->next_start = __cpu_to_le32(next_offset);
+ cmd->enabled = __cpu_to_le32(enabled);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tlv quiet param: period %u duration %u enabled %d\n",
+ period, duration, enabled);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_enable(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_enable_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->enable = __cpu_to_le32(1);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow enable\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_wakeup_event(struct ath10k *ar,
+ u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_tlv_wow_add_del_event_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->is_add = __cpu_to_le32(enable);
+ cmd->event_bitmap = __cpu_to_le32(1 << event);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_gen_wow_host_wakeup_ind(struct ath10k *ar)
+{
+ struct wmi_tlv_wow_host_wakeup_ind *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_add_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id, const u8 *pattern,
+ const u8 *bitmask, int pattern_len,
+ int pattern_offset)
+{
+ struct wmi_tlv_wow_add_pattern_cmd *cmd;
+ struct wmi_tlv_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*tlv) + sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ /* cmd */
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(sizeof(*tlv) + sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T);
+ tlv->len = __cpu_to_le16(sizeof(*bitmap));
+ bitmap = (void *)tlv->value;
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, bitmask, pattern_len);
+ bitmap->pattern_offset = __cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = __cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = __cpu_to_le32(pattern_len);
+ bitmap->pattern_id = __cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_UINT32);
+ tlv->len = __cpu_to_le16(sizeof(u32));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d, pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_wow_del_pattern(struct ath10k *ar, u32 vdev_id,
+ u32 pattern_id)
+{
+ struct wmi_tlv_wow_del_pattern_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ tlv = (struct wmi_tlv *)skb->data;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->pattern_id = __cpu_to_le32(pattern_id);
+ cmd->pattern_type = __cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_adaptive_qcs(struct ath10k *ar, bool enable)
+{
+ struct wmi_tlv_adaptive_qcs *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->enable = __cpu_to_le32(enable ? 1 : 0);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv adaptive qcs %d\n", enable);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ECHO_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->value = cpu_to_le32(value);
+
+ ptr += sizeof(*tlv);
+ ptr += sizeof(*cmd);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv echo value 0x%08x\n", value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->scan_count = __cpu_to_le32(arg->scan_count);
+ cmd->scan_period = __cpu_to_le32(arg->scan_period);
+ cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
+ cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
+ cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
+ cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
+ cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
+ cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
+ cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
+ cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
+ cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
+ cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
+ cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
+ cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
+ cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
+ cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
+ cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_tlv_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*tlv) + sizeof(*cmd);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (void *)skb->data;
+ tlv = ptr;
+ tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD);
+ tlv->len = __cpu_to_le16(sizeof(*cmd));
+ cmd = (void *)tlv->value;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->trigger_cmd = __cpu_to_le32(trigger);
+ cmd->enable_cmd = __cpu_to_le32(enable);
+
+ return skb;
+}
+
+/****************/
+/* TLV mappings */
+/****************/
+
+static struct wmi_cmd_map wmi_tlv_cmd_map = {
+ .init_cmdid = WMI_TLV_INIT_CMDID,
+ .start_scan_cmdid = WMI_TLV_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_TLV_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_TLV_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+ .scan_prob_req_oui_cmdid = WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
+ .pdev_set_regdomain_cmdid = WMI_TLV_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_TLV_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_TLV_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_TLV_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_TLV_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_TLV_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_TLV_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_TLV_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_TLV_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_TLV_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_TLV_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_TLV_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_TLV_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_TLV_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_TLV_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_TLV_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_TLV_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_TLV_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_TLV_MGMT_TX_CMDID,
+ .mgmt_tx_send_cmdid = WMI_TLV_MGMT_TX_SEND_CMD,
+ .prb_tmpl_cmdid = WMI_TLV_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_TLV_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_TLV_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_TLV_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_TLV_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_TLV_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_TLV_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_TLV_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_TLV_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_TLV_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_TLV_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_TLV_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_TLV_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_TLV_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_TLV_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_TLV_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_TLV_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_TLV_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_TLV_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_TLV_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_TLV_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_TLV_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid = WMI_TLV_SPECTRAL_SCAN_CONF_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_TLV_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_TLV_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_TLV_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_TLV_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_TLV_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_TLV_STA_KEEPALIVE_CMDID,
+ .echo_cmdid = WMI_TLV_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_TLV_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_TLV_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_TLV_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_TLV_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_TLV_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_TLV_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_TLV_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
+ .vdev_set_wmm_params_cmdid = WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+ .tdls_set_state_cmdid = WMI_TLV_TDLS_SET_STATE_CMDID,
+ .tdls_peer_update_cmdid = WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+ .adaptive_qcs_cmdid = WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_tlv_pdev_param_map = {
+ .tx_chain_mask = WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_TLV_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_TLV_PDEV_PARAM_DCS,
+ .ani_enable = WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_TLV_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_TLV_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_TLV_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
+ .rts_threshold = WMI_TLV_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_TLV_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_TLV_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_TLV_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_TLV_VDEV_PARAM_WDS,
+ .atim_window = WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_TLV_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_TLV_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_TLV_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_TLV_VDEV_PARAM_SGI,
+ .ldpc = WMI_TLV_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_TLV_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_TLV_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_TLV_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_TLV_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_TLV_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_TLV_VDEV_PARAM_UNSUPPORTED,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static const struct wmi_ops wmi_tlv_ops = {
+ .rx = ath10k_wmi_tlv_op_rx,
+ .map_svc = wmi_tlv_svc_map,
+ .map_svc_ext = wmi_tlv_svc_map_ext,
+
+ .pull_scan = ath10k_wmi_tlv_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_tlv_op_pull_mgmt_rx_ev,
+ .pull_mgmt_tx_compl = ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev,
+ .pull_ch_info = ath10k_wmi_tlv_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_tlv_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_tlv_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_tlv_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_tlv_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_tlv_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_tlv_op_pull_rdy_ev,
+ .pull_svc_avail = ath10k_wmi_tlv_op_pull_svc_avail,
+ .pull_fw_stats = ath10k_wmi_tlv_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_tlv_op_pull_roam_ev,
+ .pull_wow_event = ath10k_wmi_tlv_op_pull_wow_ev,
+ .pull_echo_ev = ath10k_wmi_tlv_op_pull_echo_ev,
+ .get_txbf_conf_scheme = ath10k_wmi_tlv_txbf_conf_scheme,
+
+ .gen_pdev_suspend = ath10k_wmi_tlv_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_tlv_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_tlv_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_tlv_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_tlv_op_gen_init,
+ .gen_start_scan = ath10k_wmi_tlv_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_tlv_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_tlv_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_tlv_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_tlv_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_tlv_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_tlv_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_tlv_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_tlv_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_tlv_op_gen_vdev_install_key,
+ .gen_vdev_wmm_conf = ath10k_wmi_tlv_op_gen_vdev_wmm_conf,
+ .gen_peer_create = ath10k_wmi_tlv_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_tlv_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_tlv_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_tlv_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_tlv_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_tlv_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_tlv_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_tlv_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_tlv_op_gen_scan_chan_list,
+ .gen_scan_prob_req_oui = ath10k_wmi_tlv_op_gen_scan_prob_req_oui,
+ .gen_beacon_dma = ath10k_wmi_tlv_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_tlv_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_tlv_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_tlv_op_gen_force_fw_hang,
+ /* .gen_mgmt_tx = not implemented; HTT is used */
+ .gen_mgmt_tx_send = ath10k_wmi_tlv_op_gen_mgmt_tx_send,
+ .cleanup_mgmt_tx_send = ath10k_wmi_tlv_op_cleanup_mgmt_tx_send,
+ .gen_dbglog_cfg = ath10k_wmi_tlv_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_tlv_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_tlv_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_tlv_op_gen_pdev_set_quiet_mode,
+ .gen_pdev_get_temperature = ath10k_wmi_tlv_op_gen_pdev_get_temperature,
+ /* .gen_addba_clear_resp not implemented */
+ /* .gen_addba_send not implemented */
+ /* .gen_addba_set_resp not implemented */
+ /* .gen_delba_send not implemented */
+ .gen_bcn_tmpl = ath10k_wmi_tlv_op_gen_bcn_tmpl,
+ .gen_prb_tmpl = ath10k_wmi_tlv_op_gen_prb_tmpl,
+ .gen_p2p_go_bcn_ie = ath10k_wmi_tlv_op_gen_p2p_go_bcn_ie,
+ .gen_vdev_sta_uapsd = ath10k_wmi_tlv_op_gen_vdev_sta_uapsd,
+ .gen_sta_keepalive = ath10k_wmi_tlv_op_gen_sta_keepalive,
+ .gen_wow_enable = ath10k_wmi_tlv_op_gen_wow_enable,
+ .gen_wow_add_wakeup_event = ath10k_wmi_tlv_op_gen_wow_add_wakeup_event,
+ .gen_wow_host_wakeup_ind = ath10k_wmi_tlv_gen_wow_host_wakeup_ind,
+ .gen_wow_add_pattern = ath10k_wmi_tlv_op_gen_wow_add_pattern,
+ .gen_wow_del_pattern = ath10k_wmi_tlv_op_gen_wow_del_pattern,
+ .gen_update_fw_tdls_state = ath10k_wmi_tlv_op_gen_update_fw_tdls_state,
+ .gen_tdls_peer_update = ath10k_wmi_tlv_op_gen_tdls_peer_update,
+ .gen_adaptive_qcs = ath10k_wmi_tlv_op_gen_adaptive_qcs,
+ .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_tlv_op_gen_echo,
+ .gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
+};
+
+static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
+ .auth = WMI_TLV_PEER_AUTH,
+ .qos = WMI_TLV_PEER_QOS,
+ .need_ptk_4_way = WMI_TLV_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_TLV_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_TLV_PEER_APSD,
+ .ht = WMI_TLV_PEER_HT,
+ .bw40 = WMI_TLV_PEER_40MHZ,
+ .stbc = WMI_TLV_PEER_STBC,
+ .ldbc = WMI_TLV_PEER_LDPC,
+ .dyn_mimops = WMI_TLV_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_TLV_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_TLV_PEER_SPATIAL_MUX,
+ .vht = WMI_TLV_PEER_VHT,
+ .bw80 = WMI_TLV_PEER_80MHZ,
+ .pmf = WMI_TLV_PEER_PMF,
+ .bw160 = WMI_TLV_PEER_160MHZ,
+};
+
+/************/
+/* TLV init */
+/************/
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar)
+{
+ ar->wmi.cmd = &wmi_tlv_cmd_map;
+ ar->wmi.vdev_param = &wmi_tlv_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_tlv_pdev_param_map;
+ ar->wmi.ops = &wmi_tlv_ops;
+ ar->wmi.peer_flags = &wmi_tlv_peer_flags_map;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
new file mode 100644
index 000000000..4f0c20c90
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -0,0 +1,2157 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WMI_TLV_H
+#define _WMI_TLV_H
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_MGMT_TX_FRAME_MAX_LEN 64
+
+enum wmi_tlv_grp_id {
+ WMI_TLV_GRP_START = 0x3,
+ WMI_TLV_GRP_SCAN = WMI_TLV_GRP_START,
+ WMI_TLV_GRP_PDEV,
+ WMI_TLV_GRP_VDEV,
+ WMI_TLV_GRP_PEER,
+ WMI_TLV_GRP_MGMT,
+ WMI_TLV_GRP_BA_NEG,
+ WMI_TLV_GRP_STA_PS,
+ WMI_TLV_GRP_DFS,
+ WMI_TLV_GRP_ROAM,
+ WMI_TLV_GRP_OFL_SCAN,
+ WMI_TLV_GRP_P2P,
+ WMI_TLV_GRP_AP_PS,
+ WMI_TLV_GRP_RATECTL,
+ WMI_TLV_GRP_PROFILE,
+ WMI_TLV_GRP_SUSPEND,
+ WMI_TLV_GRP_BCN_FILTER,
+ WMI_TLV_GRP_WOW,
+ WMI_TLV_GRP_RTT,
+ WMI_TLV_GRP_SPECTRAL,
+ WMI_TLV_GRP_STATS,
+ WMI_TLV_GRP_ARP_NS_OFL,
+ WMI_TLV_GRP_NLO_OFL,
+ WMI_TLV_GRP_GTK_OFL,
+ WMI_TLV_GRP_CSA_OFL,
+ WMI_TLV_GRP_CHATTER,
+ WMI_TLV_GRP_TID_ADDBA,
+ WMI_TLV_GRP_MISC,
+ WMI_TLV_GRP_GPIO,
+ WMI_TLV_GRP_FWTEST,
+ WMI_TLV_GRP_TDLS,
+ WMI_TLV_GRP_RESMGR,
+ WMI_TLV_GRP_STA_SMPS,
+ WMI_TLV_GRP_WLAN_HB,
+ WMI_TLV_GRP_RMC,
+ WMI_TLV_GRP_MHF_OFL,
+ WMI_TLV_GRP_LOCATION_SCAN,
+ WMI_TLV_GRP_OEM,
+ WMI_TLV_GRP_NAN,
+ WMI_TLV_GRP_COEX,
+ WMI_TLV_GRP_OBSS_OFL,
+ WMI_TLV_GRP_LPI,
+ WMI_TLV_GRP_EXTSCAN,
+ WMI_TLV_GRP_DHCP_OFL,
+ WMI_TLV_GRP_IPA,
+ WMI_TLV_GRP_MDNS_OFL,
+ WMI_TLV_GRP_SAP_OFL,
+};
+
+enum wmi_tlv_cmd_id {
+ WMI_TLV_INIT_CMDID = 0x1,
+ WMI_TLV_START_SCAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SCAN),
+ WMI_TLV_STOP_SCAN_CMDID,
+ WMI_TLV_SCAN_CHAN_LIST_CMDID,
+ WMI_TLV_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_TLV_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_TLV_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_TLV_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PDEV),
+ WMI_TLV_PDEV_SET_CHANNEL_CMDID,
+ WMI_TLV_PDEV_SET_PARAM_CMDID,
+ WMI_TLV_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_TLV_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_TLV_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_TLV_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_TLV_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_TLV_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_TLV_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_TLV_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_TLV_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_TLV_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_TLV_PDEV_DUMP_CMDID,
+ WMI_TLV_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_TLV_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_TLV_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_TLV_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_VDEV),
+ WMI_TLV_VDEV_DELETE_CMDID,
+ WMI_TLV_VDEV_START_REQUEST_CMDID,
+ WMI_TLV_VDEV_RESTART_REQUEST_CMDID,
+ WMI_TLV_VDEV_UP_CMDID,
+ WMI_TLV_VDEV_STOP_CMDID,
+ WMI_TLV_VDEV_DOWN_CMDID,
+ WMI_TLV_VDEV_SET_PARAM_CMDID,
+ WMI_TLV_VDEV_INSTALL_KEY_CMDID,
+ WMI_TLV_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_TLV_VDEV_WMM_ADDTS_CMDID,
+ WMI_TLV_VDEV_WMM_DELTS_CMDID,
+ WMI_TLV_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_TLV_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_TLV_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_TLV_VDEV_PLMREQ_START_CMDID,
+ WMI_TLV_VDEV_PLMREQ_STOP_CMDID,
+ WMI_TLV_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PEER),
+ WMI_TLV_PEER_DELETE_CMDID,
+ WMI_TLV_PEER_FLUSH_TIDS_CMDID,
+ WMI_TLV_PEER_SET_PARAM_CMDID,
+ WMI_TLV_PEER_ASSOC_CMDID,
+ WMI_TLV_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_TLV_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_TLV_PEER_MCAST_GROUP_CMDID,
+ WMI_TLV_PEER_INFO_REQ_CMDID,
+ WMI_TLV_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_TLV_BCN_TX_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MGMT),
+ WMI_TLV_PDEV_SEND_BCN_CMDID,
+ WMI_TLV_BCN_TMPL_CMDID,
+ WMI_TLV_BCN_FILTER_RX_CMDID,
+ WMI_TLV_PRB_REQ_FILTER_RX_CMDID,
+ WMI_TLV_MGMT_TX_CMDID,
+ WMI_TLV_PRB_TMPL_CMDID,
+ WMI_TLV_MGMT_TX_SEND_CMD,
+ WMI_TLV_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BA_NEG),
+ WMI_TLV_ADDBA_SEND_CMDID,
+ WMI_TLV_ADDBA_STATUS_CMDID,
+ WMI_TLV_DELBA_SEND_CMDID,
+ WMI_TLV_ADDBA_SET_RESP_CMDID,
+ WMI_TLV_SEND_SINGLEAMSDU_CMDID,
+ WMI_TLV_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_PS),
+ WMI_TLV_STA_POWERSAVE_PARAM_CMDID,
+ WMI_TLV_STA_MIMO_PS_MODE_CMDID,
+ WMI_TLV_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_DFS),
+ WMI_TLV_PDEV_DFS_DISABLE_CMDID,
+ WMI_TLV_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_TLV_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_TLV_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_TLV_GRP_ROAM),
+ WMI_TLV_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TLV_ROAM_SCAN_PERIOD,
+ WMI_TLV_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TLV_ROAM_AP_PROFILE,
+ WMI_TLV_ROAM_CHAN_LIST,
+ WMI_TLV_ROAM_SCAN_CMD,
+ WMI_TLV_ROAM_SYNCH_COMPLETE,
+ WMI_TLV_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_TLV_ROAM_INVOKE_CMDID,
+ WMI_TLV_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_TLV_GRP_OFL_SCAN),
+ WMI_TLV_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_TLV_OFL_SCAN_PERIOD,
+ WMI_TLV_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_TLV_GRP_P2P),
+ WMI_TLV_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_TLV_P2P_GO_SET_BEACON_IE,
+ WMI_TLV_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_TLV_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_TLV_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_TLV_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_TLV_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_AP_PS),
+ WMI_TLV_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_TLV_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RATECTL),
+ WMI_TLV_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_PROFILE),
+ WMI_TLV_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_TLV_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_TLV_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_TLV_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_TLV_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SUSPEND),
+ WMI_TLV_PDEV_RESUME_CMDID,
+ WMI_TLV_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_BCN_FILTER),
+ WMI_TLV_RMV_BCN_FILTER_CMDID,
+ WMI_TLV_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WOW),
+ WMI_TLV_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_TLV_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_TLV_WOW_ENABLE_CMDID,
+ WMI_TLV_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_TLV_WOW_ACER_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_TLV_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_TLV_EXTWOW_ENABLE_CMDID,
+ WMI_TLV_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_TLV_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_TLV_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RTT),
+ WMI_TLV_RTT_TSF_CMDID,
+ WMI_TLV_SPECTRAL_SCAN_CONF_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SPECTRAL),
+ WMI_TLV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_TLV_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STATS),
+ WMI_TLV_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_TLV_REQUEST_STATS_EXT_CMDID,
+ WMI_TLV_REQUEST_LINK_STATS_CMDID,
+ WMI_TLV_START_LINK_STATS_CMDID,
+ WMI_TLV_CLEAR_LINK_STATS_CMDID,
+ WMI_TLV_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_ARP_NS_OFL),
+ WMI_TLV_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_TLV_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_TLV_NETWORK_LIST_OFFLOAD_CONFIG_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_NLO_OFL),
+ WMI_TLV_APFIND_CMDID,
+ WMI_TLV_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GTK_OFL),
+ WMI_TLV_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CSA_OFL),
+ WMI_TLV_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_TLV_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_CHATTER),
+ WMI_TLV_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_TLV_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_TLV_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_TLV_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TID_ADDBA),
+ WMI_TLV_PEER_TID_DELBA_CMDID,
+ WMI_TLV_STA_DTIM_PS_METHOD_CMDID,
+ WMI_TLV_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_TLV_STA_KEEPALIVE_CMDID,
+ WMI_TLV_BA_REQ_SSN_CMDID,
+ WMI_TLV_ECHO_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MISC),
+ WMI_TLV_PDEV_UTF_CMDID,
+ WMI_TLV_DBGLOG_CFG_CMDID,
+ WMI_TLV_PDEV_QVIT_CMDID,
+ WMI_TLV_PDEV_FTM_INTG_CMDID,
+ WMI_TLV_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_TLV_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_TLV_FORCE_FW_HANG_CMDID,
+ WMI_TLV_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_TLV_THERMAL_MGMT_CMDID,
+ WMI_TLV_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TLV_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_TLV_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_TLV_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_GPIO),
+ WMI_TLV_GPIO_OUTPUT_CMDID,
+ WMI_TLV_TXBF_CMDID,
+ WMI_TLV_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_FWTEST),
+ WMI_TLV_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_TLV_UNIT_TEST_CMDID,
+ WMI_TLV_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_TDLS),
+ WMI_TLV_TDLS_PEER_UPDATE_CMDID,
+ WMI_TLV_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_TLV_RESMGR_ADAPTIVE_OCS_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RESMGR),
+ WMI_TLV_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_TLV_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_TLV_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_STA_SMPS),
+ WMI_TLV_STA_SMPS_PARAM_CMDID,
+ WMI_TLV_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_WLAN_HB),
+ WMI_TLV_HB_SET_TCP_PARAMS_CMDID,
+ WMI_TLV_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_TLV_HB_SET_UDP_PARAMS_CMDID,
+ WMI_TLV_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_TLV_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_RMC),
+ WMI_TLV_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_TLV_RMC_CONFIG_CMDID,
+ WMI_TLV_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MHF_OFL),
+ WMI_TLV_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_TLV_BATCH_SCAN_ENABLE_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_LOCATION_SCAN),
+ WMI_TLV_BATCH_SCAN_DISABLE_CMDID,
+ WMI_TLV_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_TLV_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OEM),
+ WMI_TLV_NAN_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_NAN),
+ WMI_TLV_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_COEX),
+ WMI_TLV_CHAN_AVOID_UPDATE_CMDID,
+ WMI_TLV_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_OBSS_OFL),
+ WMI_TLV_OBSS_SCAN_DISABLE_CMDID,
+ WMI_TLV_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_LPI),
+ WMI_TLV_LPI_START_SCAN_CMDID,
+ WMI_TLV_LPI_STOP_SCAN_CMDID,
+ WMI_TLV_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_EXTSCAN),
+ WMI_TLV_EXTSCAN_STOP_CMDID,
+ WMI_TLV_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_TLV_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_TLV_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_TLV_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_TLV_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_TLV_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_TLV_SET_DHCP_SERVER_OFFLOAD_CMDID =
+ WMI_TLV_CMD(WMI_TLV_GRP_DHCP_OFL),
+ WMI_TLV_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_IPA),
+ WMI_TLV_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_MDNS_OFL),
+ WMI_TLV_MDNS_SET_FQDN_CMDID,
+ WMI_TLV_MDNS_SET_RESPONSE_CMDID,
+ WMI_TLV_MDNS_GET_STATS_CMDID,
+ WMI_TLV_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_TLV_GRP_SAP_OFL),
+};
+
+enum wmi_tlv_event_id {
+ WMI_TLV_SERVICE_READY_EVENTID = 0x1,
+ WMI_TLV_READY_EVENTID,
+ WMI_TLV_SERVICE_AVAILABLE_EVENTID,
+ WMI_TLV_SCAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SCAN),
+ WMI_TLV_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PDEV),
+ WMI_TLV_CHAN_INFO_EVENTID,
+ WMI_TLV_PHYERR_EVENTID,
+ WMI_TLV_PDEV_DUMP_EVENTID,
+ WMI_TLV_TX_PAUSE_EVENTID,
+ WMI_TLV_DFS_RADAR_EVENTID,
+ WMI_TLV_PDEV_L1SS_TRACK_EVENTID,
+ WMI_TLV_PDEV_TEMPERATURE_EVENTID,
+ WMI_TLV_VDEV_START_RESP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_VDEV),
+ WMI_TLV_VDEV_STOPPED_EVENTID,
+ WMI_TLV_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_TLV_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_TLV_PEER_STA_KICKOUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_PEER),
+ WMI_TLV_PEER_INFO_EVENTID,
+ WMI_TLV_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_TLV_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_TLV_PEER_STATE_EVENTID,
+ WMI_TLV_MGMT_RX_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MGMT),
+ WMI_TLV_HOST_SWBA_EVENTID,
+ WMI_TLV_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_TLV_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_TLV_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_TLV_MGMT_TX_COMPLETION_EVENTID,
+ WMI_TLV_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_BA_NEG),
+ WMI_TLV_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_TLV_BA_RSP_SSN_EVENTID,
+ WMI_TLV_AGGR_STATE_TRIG_EVENTID,
+ WMI_TLV_ROAM_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_ROAM),
+ WMI_TLV_PROFILE_MATCH,
+ WMI_TLV_ROAM_SYNCH_EVENTID,
+ WMI_TLV_P2P_DISC_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_P2P),
+ WMI_TLV_P2P_NOA_EVENTID,
+ WMI_TLV_PDEV_RESUME_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SUSPEND),
+ WMI_TLV_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_WOW),
+ WMI_TLV_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_TLV_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_RTT),
+ WMI_TLV_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_TLV_RTT_ERROR_REPORT_EVENTID,
+ WMI_TLV_STATS_EXT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_STATS),
+ WMI_TLV_IFACE_LINK_STATS_EVENTID,
+ WMI_TLV_PEER_LINK_STATS_EVENTID,
+ WMI_TLV_RADIO_LINK_STATS_EVENTID,
+ WMI_TLV_NLO_MATCH_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NLO_OFL),
+ WMI_TLV_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_TLV_APFIND_EVENTID,
+ WMI_TLV_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GTK_OFL),
+ WMI_TLV_GTK_REKEY_FAIL_EVENTID,
+ WMI_TLV_CSA_HANDLING_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CSA_OFL),
+ WMI_TLV_CHATTER_PC_QUERY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_CHATTER),
+ WMI_TLV_ECHO_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MISC),
+ WMI_TLV_PDEV_UTF_EVENTID,
+ WMI_TLV_DEBUG_MESG_EVENTID,
+ WMI_TLV_UPDATE_STATS_EVENTID,
+ WMI_TLV_DEBUG_PRINT_EVENTID,
+ WMI_TLV_DCS_INTERFERENCE_EVENTID,
+ WMI_TLV_PDEV_QVIT_EVENTID,
+ WMI_TLV_WLAN_PROFILE_DATA_EVENTID,
+ WMI_TLV_PDEV_FTM_INTG_EVENTID,
+ WMI_TLV_WLAN_FREQ_AVOID_EVENTID,
+ WMI_TLV_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_TLV_THERMAL_MGMT_EVENTID,
+ WMI_TLV_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_TLV_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_TLV_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_TLV_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_TLV_DIAG_EVENTID,
+ WMI_TLV_GPIO_INPUT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_GPIO),
+ WMI_TLV_UPLOADH_EVENTID,
+ WMI_TLV_CAPTUREH_EVENTID,
+ WMI_TLV_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TLV_TDLS_PEER_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_TDLS),
+ WMI_TLV_BATCH_SCAN_ENABLED_EVENTID =
+ WMI_TLV_EV(WMI_TLV_GRP_LOCATION_SCAN),
+ WMI_TLV_BATCH_SCAN_RESULT_EVENTID,
+ WMI_TLV_OEM_CAPABILITY_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_OEM),
+ WMI_TLV_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_TLV_OEM_ERROR_REPORT_EVENTID,
+ WMI_TLV_NAN_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_NAN),
+ WMI_TLV_LPI_RESULT_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_LPI),
+ WMI_TLV_LPI_STATUS_EVENTID,
+ WMI_TLV_LPI_HANDOFF_EVENTID,
+ WMI_TLV_EXTSCAN_START_STOP_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_EXTSCAN),
+ WMI_TLV_EXTSCAN_OPERATION_EVENTID,
+ WMI_TLV_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_TLV_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_TLV_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_TLV_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_TLV_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_TLV_MDNS_STATS_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_MDNS_OFL),
+ WMI_TLV_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_EV(WMI_TLV_GRP_SAP_OFL),
+ WMI_TLV_SAP_OFL_DEL_STA_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_TLV_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_TLV_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_TLV_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_TLV_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_TLV_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_TLV_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_TLV_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_TLV_PDEV_PARAM_PROTECTION_MODE,
+ WMI_TLV_PDEV_PARAM_DYNAMIC_BW,
+ WMI_TLV_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_TLV_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_TLV_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_TLV_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_TLV_PDEV_PARAM_LTR_ENABLE,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_TLV_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_TLV_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_TLV_PDEV_PARAM_L1SS_ENABLE,
+ WMI_TLV_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_TLV_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_TLV_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_PMF_QOS,
+ WMI_TLV_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_TLV_PDEV_PARAM_DCS,
+ WMI_TLV_PDEV_PARAM_ANI_ENABLE,
+ WMI_TLV_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_TLV_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_TLV_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_TLV_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_TLV_PDEV_PARAM_DYNTXCHAIN,
+ WMI_TLV_PDEV_PARAM_PROXY_STA,
+ WMI_TLV_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_TLV_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_TLV_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_TLV_PDEV_PARAM_BURST_DUR,
+ WMI_TLV_PDEV_PARAM_BURST_ENABLE,
+ WMI_TLV_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_TLV_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_TLV_PDEV_PARAM_L1SS_TRACK,
+ WMI_TLV_PDEV_PARAM_HYST_EN,
+ WMI_TLV_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_TLV_PDEV_PARAM_LED_SYS_STATE,
+ WMI_TLV_PDEV_PARAM_LED_ENABLE,
+ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_TLV_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_TLV_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_TLV_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_NONE,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_SAR,
+ WMI_TLV_PDEV_PARAM_TXPOWER_REASON_MAX,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_TLV_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_TLV_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_TLV_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_TLV_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_TLV_VDEV_PARAM_MULTICAST_RATE,
+ WMI_TLV_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_TLV_VDEV_PARAM_SLOT_TIME,
+ WMI_TLV_VDEV_PARAM_PREAMBLE,
+ WMI_TLV_VDEV_PARAM_SWBA_TIME,
+ WMI_TLV_VDEV_STATS_UPDATE_PERIOD,
+ WMI_TLV_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_TLV_VDEV_HOST_SWBA_INTERVAL,
+ WMI_TLV_VDEV_PARAM_DTIM_PERIOD,
+ WMI_TLV_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_TLV_VDEV_PARAM_WDS,
+ WMI_TLV_VDEV_PARAM_ATIM_WINDOW,
+ WMI_TLV_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_TLV_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_TLV_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_TLV_VDEV_PARAM_FEATURE_WMM,
+ WMI_TLV_VDEV_PARAM_CHWIDTH,
+ WMI_TLV_VDEV_PARAM_CHEXTOFFSET,
+ WMI_TLV_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_TLV_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_TLV_VDEV_PARAM_MGMT_RATE,
+ WMI_TLV_VDEV_PARAM_PROTECTION_MODE,
+ WMI_TLV_VDEV_PARAM_FIXED_RATE,
+ WMI_TLV_VDEV_PARAM_SGI,
+ WMI_TLV_VDEV_PARAM_LDPC,
+ WMI_TLV_VDEV_PARAM_TX_STBC,
+ WMI_TLV_VDEV_PARAM_RX_STBC,
+ WMI_TLV_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_TLV_VDEV_PARAM_DEF_KEYID,
+ WMI_TLV_VDEV_PARAM_NSS,
+ WMI_TLV_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_TLV_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_TLV_VDEV_PARAM_MCAST_INDICATE,
+ WMI_TLV_VDEV_PARAM_DHCP_INDICATE,
+ WMI_TLV_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_TLV_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_TLV_VDEV_PARAM_TXBF,
+ WMI_TLV_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_TLV_VDEV_PARAM_DROP_UNENCRY,
+ WMI_TLV_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_TLV_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_TLV_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_TLV_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_TLV_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_TLV_VDEV_PARAM_ENABLE_RMC,
+ WMI_TLV_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_TLV_VDEV_PARAM_MAX_RATE,
+ WMI_TLV_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_TLV_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_TLV_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_TLV_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_TLV_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_TLV_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_TLV_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_TLV_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_TLV_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_TLV_VDEV_PARAM_DTIM_POLICY,
+ WMI_TLV_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_TLV_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+};
+
+enum wmi_tlv_peer_flags {
+ WMI_TLV_PEER_AUTH = 0x00000001,
+ WMI_TLV_PEER_QOS = 0x00000002,
+ WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_TLV_PEER_APSD = 0x00000800,
+ WMI_TLV_PEER_HT = 0x00001000,
+ WMI_TLV_PEER_40MHZ = 0x00002000,
+ WMI_TLV_PEER_STBC = 0x00008000,
+ WMI_TLV_PEER_LDPC = 0x00010000,
+ WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_TLV_PEER_VHT = 0x02000000,
+ WMI_TLV_PEER_80MHZ = 0x04000000,
+ WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_TLV_PEER_160MHZ = 0x20000000,
+};
+
+enum wmi_tlv_tag {
+ WMI_TLV_TAG_LAST_RESERVED = 15,
+
+ WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+ WMI_TLV_TAG_ARRAY_UINT32 = WMI_TLV_TAG_FIRST_ARRAY_ENUM,
+ WMI_TLV_TAG_ARRAY_BYTE,
+ WMI_TLV_TAG_ARRAY_STRUCT,
+ WMI_TLV_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TLV_TAG_LAST_ARRAY_ENUM = 31,
+
+ WMI_TLV_TAG_STRUCT_SERVICE_READY_EVENT,
+ WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEM_REQ,
+ WMI_TLV_TAG_STRUCT_READY_EVENT,
+ WMI_TLV_TAG_STRUCT_SCAN_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_TPC_CONFIG_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_COMB_PHYERR_RX_HDR,
+ WMI_TLV_TAG_STRUCT_VDEV_START_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_STOPPED_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STA_KICKOUT_EVENT,
+ WMI_TLV_TAG_STRUCT_MGMT_RX_HDR,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_DELBA_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_EVENT,
+ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO,
+ WMI_TLV_TAG_STRUCT_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TLV_TAG_STRUCT_RTT_EVENT_HEADER,
+ WMI_TLV_TAG_STRUCT_RTT_ERROR_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_RTT_MEAS_EVENT,
+ WMI_TLV_TAG_STRUCT_ECHO_EVENT,
+ WMI_TLV_TAG_STRUCT_FTM_INTG_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TLV_TAG_STRUCT_GPIO_INPUT_EVENT,
+ WMI_TLV_TAG_STRUCT_CSA_EVENT,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_IGTK_INFO,
+ WMI_TLV_TAG_STRUCT_DCS_INTERFERENCE_EVENT,
+ WMI_TLV_TAG_STRUCT_ATH_DCS_CW_INT,
+ WMI_TLV_TAG_STRUCT_ATH_DCS_WLAN_INT_STAT,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_CTX_T,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_T,
+ WMI_TLV_TAG_STRUCT_PDEV_QVIT_EVENT,
+ WMI_TLV_TAG_STRUCT_HOST_SWBA_EVENT,
+ WMI_TLV_TAG_STRUCT_TIM_INFO,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_INFO,
+ WMI_TLV_TAG_STRUCT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGES_EVENT,
+ WMI_TLV_TAG_STRUCT_AVOID_FREQ_RANGE_DESC,
+ WMI_TLV_TAG_STRUCT_GTK_REKEY_FAIL_EVENT,
+ WMI_TLV_TAG_STRUCT_INIT_CMD,
+ WMI_TLV_TAG_STRUCT_RESOURCE_CONFIG,
+ WMI_TLV_TAG_STRUCT_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TLV_TAG_STRUCT_START_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_STOP_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_CHAN_LIST_CMD,
+ WMI_TLV_TAG_STRUCT_CHANNEL,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_WMM_PARAMS,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_QUIET_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_CREATE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DELETE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_START_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_DESCRIPTOR,
+ WMI_TLV_TAG_STRUCT_P2P_GO_SET_BEACON_IE,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_UP_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DOWN_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_INSTALL_KEY_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_CREATE_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_DELETE_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_FLUSH_TIDS_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TLV_TAG_STRUCT_VHT_RATE_SET,
+ WMI_TLV_TAG_STRUCT_BCN_TMPL_CMD,
+ WMI_TLV_TAG_STRUCT_PRB_TMPL_CMD,
+ WMI_TLV_TAG_STRUCT_BCN_PRB_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_TID_ADDBA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_TID_DELBA_CMD,
+ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_STA_POWERSAVE_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_STA_DTIM_PS_METHOD_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_MODE,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_PERIOD,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TLV_TAG_STRUCT_PDEV_SUSPEND_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_RESUME_CMD,
+ WMI_TLV_TAG_STRUCT_ADD_BCN_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_RMV_BCN_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TLV_TAG_STRUCT_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TLV_TAG_STRUCT_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE,
+ WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE,
+ WMI_TLV_TAG_STRUCT_FTM_INTG_CMD,
+ WMI_TLV_TAG_STRUCT_STA_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_STA_KEEPALVE_ARP_RESPONSE,
+ WMI_TLV_TAG_STRUCT_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_PEER_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TLV_TAG_STRUCT_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_DEL_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ADD_DEL_EVT_CMD,
+ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_HEAD,
+ WMI_TLV_TAG_STRUCT_RTT_MEASREQ_BODY,
+ WMI_TLV_TAG_STRUCT_RTT_TSF_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIGURED_PARAMETERS,
+ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_ECHO_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_FORCE_FW_HANG_CMD,
+ WMI_TLV_TAG_STRUCT_GPIO_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_GPIO_OUTPUT_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_BCN_TX_HDR,
+ WMI_TLV_TAG_STRUCT_BCN_SEND_FROM_HOST_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_HDR,
+ WMI_TLV_TAG_STRUCT_ADDBA_CLEAR_RESP_CMD,
+ WMI_TLV_TAG_STRUCT_ADDBA_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_DELBA_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_ADDBA_SETRESPONSE_CMD,
+ WMI_TLV_TAG_STRUCT_SEND_SINGLEAMSDU_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HT_IE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_VHT_IE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_MCAST_GROUP_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_AP_PROFILE,
+ WMI_TLV_TAG_STRUCT_AP_PROFILE,
+ WMI_TLV_TAG_STRUCT_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ADD_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_BITMAP_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_MAGIC_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TLV_TAG_STRUCT_TXBF_CMD,
+ WMI_TLV_TAG_STRUCT_DEBUG_LOG_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_EVENT,
+ WMI_TLV_TAG_STRUCT_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TLV_TAG_STRUCT_UPLOAD_H_HDR,
+ WMI_TLV_TAG_STRUCT_CAPTURE_H_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_WMM_ADDTS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_WMM_DELTS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_SET_STATE_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_UPDATE_CMD,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_EVENT,
+ WMI_TLV_TAG_STRUCT_TDLS_PEER_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_CHAN_LIST,
+ WMI_TLV_TAG_STRUCT_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_RESMGR_ADAPTIVE_OCS_CMD,
+ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TLV_TAG_STRUCT_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD,
+ WMI_TLV_TAG_STRUCT_BA_RSP_SSN_EVENT,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_SET_OPPPS_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_SET_NOA_CMD,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TLV_TAG_STRUCT_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_P2P_NOA_EVENT,
+ WMI_TLV_TAG_STRUCT_HB_SET_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_HB_IND_EVENT,
+ WMI_TLV_TAG_STRUCT_TX_PAUSE_EVENT,
+ WMI_TLV_TAG_STRUCT_RFKILL_EVENT,
+ WMI_TLV_TAG_STRUCT_DFS_RADAR_EVENT,
+ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TLV_TAG_STRUCT_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TLV_TAG_STRUCT_BATCH_SCAN_RESULT_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_START_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_PLMREQ_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_CMD,
+ WMI_TLV_TAG_STRUCT_THERMAL_MGMT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_INFO_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TLV_TAG_STRUCT_RMC_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_NAN_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_NAN_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_PDEV_L1SS_TRACK_EVENT,
+ WMI_TLV_TAG_STRUCT_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TLV_TAG_STRUCT_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TLV_TAG_STRUCT_AGGR_STATE_TRIG_EVENT,
+ WMI_TLV_TAG_STRUCT_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_REQ_STATS_EXT_CMD,
+ WMI_TLV_TAG_STRUCT_STATS_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_OBSS_SCAN_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OBSS_SCAN_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TLV_TAG_STRUCT_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_PKT_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_TMR_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_IOAC_KEEPALIVE_T,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_ADD_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_ACER_IOAC_DEL_PATTERN_CMD,
+ WMI_TLV_TAG_STRUCT_START_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_CLEAR_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_LINK_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHANNEL_STATS,
+ WMI_TLV_TAG_STRUCT_RADIO_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_RATE_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_WMM_AC_STATS,
+ WMI_TLV_TAG_STRUCT_IFACE_LINK_STATS,
+ WMI_TLV_TAG_STRUCT_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_START_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_STOP_SCAN_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_RESULT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STATE_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_START_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_OPERATION_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_START_STOP_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TLV_TAG_STRUCT_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TLV_TAG_STRUCT_UNIT_TEST_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_COMPLETE,
+ WMI_TLV_TAG_STRUCT_EXTWOW_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_LPI_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_LPI_HANDOFF_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_RATE_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_RATE_HT_INFO,
+ WMI_TLV_TAG_STRUCT_RIC_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_TEMPERATURE_EVENT,
+ WMI_TLV_TAG_STRUCT_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_RIC_TSPEC,
+ WMI_TLV_TAG_STRUCT_TPC_CHAINMASK_CONFIG,
+ WMI_TLV_TAG_STRUCT_IPA_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TLV_TAG_STRUCT_KEY_MATERIAL,
+ WMI_TLV_TAG_STRUCT_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_LED_FLASHING_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_SET_FQDN_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_SET_RESP_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_GET_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_MDNS_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_INVOKE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_RESUME_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_ADD_STA_EVENT,
+ WMI_TLV_TAG_STRUCT_SAP_OFL_DEL_STA_EVENT,
+ WMI_TLV_TAG_STRUCT_APFIND_CMD_PARAM,
+ WMI_TLV_TAG_STRUCT_APFIND_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_SET_SCHED_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_SET_UTC_TIME_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_CMD,
+ WMI_TLV_TAG_STRUCT_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_GET_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TLV_TAG_STRUCT_DCC_GET_STATS_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_CLEAR_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_CMD,
+ WMI_TLV_TAG_STRUCT_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_DCC_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_OCB_CHANNEL,
+ WMI_TLV_TAG_STRUCT_OCB_SCHEDULE_ELEMENT,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_CHAN,
+ WMI_TLV_TAG_STRUCT_QOS_PARAMETER,
+ WMI_TLV_TAG_STRUCT_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TLV_TAG_STRUCT_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_FILTER_FIXED_PARAM,
+ WMI_TLV_TAG_STRUCT_PASSPOINT_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PASSPOINT_EVENT_HDR,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_TSF_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_GET_FW_MEM_DUMP,
+ WMI_TLV_TAG_STRUCT_UPDATE_FW_MEM_DUMP,
+ WMI_TLV_TAG_STRUCT_FW_MEM_DUMP_PARAMS,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TLV_TAG_STRUCT_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TLV_TAG_STRUCT_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_IE_CMD,
+ WMI_TLV_TAG_STRUCT_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TLV_TAG_STRUCT_RSSI_BREACH_EVENT,
+ WMI_TLV_TAG_STRUCT_EVENT_INITIAL_WAKEUP,
+ WMI_TLV_TAG_STRUCT_SOC_SET_PCL_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_TXRX_STREAMS,
+ WMI_TLV_TAG_STRUCT_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_IOAC_SOCK_PATTERN_T,
+ WMI_TLV_TAG_STRUCT_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_CONFIG,
+ WMI_TLV_TAG_STRUCT_DIAG_EVENT_LOG_SUPPORTED_EVENT,
+ WMI_TLV_TAG_STRUCT_PACKET_FILTER_CONFIG,
+ WMI_TLV_TAG_STRUCT_PACKET_FILTER_ENABLE,
+ WMI_TLV_TAG_STRUCT_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_EVENT,
+ WMI_TLV_TAG_STRUCT_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TLV_TAG_STRUCT_LRO_INFO_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TLV_TAG_STRUCT_SERVICE_READY_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TLV_TAG_STRUCT_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TLV_TAG_STRUCT_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TLV_TAG_STRUCT_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ASSOC_CONF_EVENT,
+ WMI_TLV_TAG_STRUCT_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_EGAP_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TLV_TAG_STRUCT_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_SCPC_EVENT,
+ WMI_TLV_TAG_STRUCT_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TLV_TAG_STRUCT_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_BPF_GET_CAPABILITY_CMD,
+ WMI_TLV_TAG_STRUCT_BPF_CAPABILITY_INFO_EVT,
+ WMI_TLV_TAG_STRUCT_BPF_GET_VDEV_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TLV_TAG_STRUCT_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_DELETE_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_DELETE_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_DENSE_THRES_PARAM,
+ WMI_TLV_TAG_STRUCT_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TLV_TAG_STRUCT_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_CONFIG_RATEMASK,
+ WMI_TLV_TAG_STRUCT_PDEV_FIPS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TLV_TAG_STRUCT_FWTEST_SET_PARAM_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ATF_REQUEST,
+ WMI_TLV_TAG_STRUCT_VDEV_ATF_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_INST_RSSI_STATS_RESP,
+ WMI_TLV_TAG_STRUCT_MED_UTIL_REPORT_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_WDS_ADDR_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_RATECODE_LIST_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_TPC_EVENT,
+ WMI_TLV_TAG_STRUCT_ANI_OFDM_EVENT,
+ WMI_TLV_TAG_STRUCT_ANI_CCK_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_FIPS_EVENT,
+ WMI_TLV_TAG_STRUCT_ATF_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_TPC_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_QBOOST_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TLV_TAG_STRUCT_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TLV_TAG_STRUCT_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_MCS_RATE_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TLV_TAG_STRUCT_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TLV_TAG_STRUCT_MU_REPORT_TOTAL_MU,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_SET_MBO,
+ WMI_TLV_TAG_STRUCT_MIB_STATS_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TLV_TAG_STRUCT_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TLV_TAG_STRUCT_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TLV_TAG_STRUCT_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TLV_TAG_STRUCT_NDI_GET_CAP_REQ,
+ WMI_TLV_TAG_STRUCT_NDP_INITIATOR_REQ,
+ WMI_TLV_TAG_STRUCT_NDP_RESPONDER_REQ,
+ WMI_TLV_TAG_STRUCT_NDP_END_REQ,
+ WMI_TLV_TAG_STRUCT_NDI_CAP_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_INITIATOR_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_RESPONDER_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_END_RSP_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_INDICATION_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_CONFIRM_EVENT,
+ WMI_TLV_TAG_STRUCT_NDP_END_INDICATION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_QUIET_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_PCL_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TLV_TAG_STRUCT_COEX_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TLV_TAG_STRUCT_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TLV_TAG_STRUCT_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TLV_TAG_STRUCT_MAC_PHY_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_HW_MODE_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TLV_TAG_STRUCT_HAL_REG_CAPABILITIES_EXT,
+ WMI_TLV_TAG_STRUCT_SOC_HAL_REG_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_VDEV_WISA_CMD,
+ WMI_TLV_TAG_STRUCT_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TLV_TAG_STRUCT_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TLV_TAG_STRUCT_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TLV_TAG_STRUCT_NDP_END_RSP_PER_NDI,
+ WMI_TLV_TAG_STRUCT_PEER_BWF_REQUEST,
+ WMI_TLV_TAG_STRUCT_BWF_PEER_INFO,
+ WMI_TLV_TAG_STRUCT_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_SET_LEADER_CMD,
+ WMI_TLV_TAG_STRUCT_RMC_MANUAL_LEADER_EVENT,
+ WMI_TLV_TAG_STRUCT_PER_CHAIN_RSSI_STATS,
+ WMI_TLV_TAG_STRUCT_RSSI_STATS,
+ WMI_TLV_TAG_STRUCT_P2P_LO_START_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_LO_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_P2P_LO_STOPPED_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_SETUP_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TLV_TAG_STRUCT_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_CMD,
+ WMI_TLV_TAG_STRUCT_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TLV_TAG_STRUCT_TLV_BUF_LEN_PARAM,
+ WMI_TLV_TAG_STRUCT_SERVICE_AVAILABLE_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PEER_ANTDIV_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_MNT_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_GET_CHIP_POWER_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TLV_TAG_STRUCT_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS,
+ WMI_TLV_TAG_STRUCT_TX_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_AC_TX_STATS,
+ WMI_TLV_TAG_STRUCT_RX_STATS,
+ WMI_TLV_TAG_STRUCT_PEER_AC_RX_STATS,
+ WMI_TLV_TAG_STRUCT_REPORT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_CHAN_CCA_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_PEER_SIGNAL_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_TX_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_RX_STATS_THRESH,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TLV_TAG_STRUCT_REQUEST_WLAN_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_EVENT,
+ WMI_TLV_TAG_STRUCT_RX_AGGR_FAILURE_INFO,
+ WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_BAND_TO_MAC,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_INFO,
+ WMI_TLV_TAG_STRUCT_TBTT_OFFSET_EXT_EVENT,
+ WMI_TLV_TAG_STRUCT_SAR_LIMITS_CMD,
+ WMI_TLV_TAG_STRUCT_SAR_LIMIT_CMD_ROW,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TLV_TAG_STRUCT_VENDOR_OUI,
+ WMI_TLV_TAG_STRUCT_REQUEST_RCPI_CMD,
+ WMI_TLV_TAG_STRUCT_UPDATE_RCPI_EVENT,
+ WMI_TLV_TAG_STRUCT_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_INFO,
+ WMI_TLV_TAG_STRUCT_PEER_STATS_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_PKGID_EVENT,
+ WMI_TLV_TAG_STRUCT_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TLV_TAG_STRUCT_SET_CURRENT_COUNTRY_CMD,
+ WMI_TLV_TAG_STRUCT_REGULATORY_RULE_STRUCT,
+ WMI_TLV_TAG_STRUCT_REG_CHAN_LIST_CC_EVENT,
+ WMI_TLV_TAG_STRUCT_11D_SCAN_START_CMD,
+ WMI_TLV_TAG_STRUCT_11D_SCAN_STOP_CMD,
+ WMI_TLV_TAG_STRUCT_11D_NEW_COUNTRY_EVENT,
+ WMI_TLV_TAG_STRUCT_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS,
+ WMI_TLV_TAG_STRUCT_RADIO_CHAN_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_PER_CONFIG,
+ WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TLV_TAG_STRUCT_HW_DATA_FILTER_CMD,
+ WMI_TLV_TAG_STRUCT_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TLV_TAG_STRUCT_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TLV_TAG_STRUCT_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TLV_TAG_STRUCT_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TLV_TAG_STRUCT_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TLV_TAG_STRUCT_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TLV_TAG_STRUCT_VDEV_SET_ARP_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_IFACE_OFFLOAD_STATS,
+ WMI_TLV_TAG_STRUCT_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TLV_TAG_STRUCT_RSSI_CTL_EXT,
+ WMI_TLV_TAG_STRUCT_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TLV_TAG_STRUCT_COEX_BT_ACTIVITY_EVENT,
+ WMI_TLV_TAG_STRUCT_VDEV_GET_TX_POWER_CMD,
+ WMI_TLV_TAG_STRUCT_VDEV_TX_POWER_EVENT,
+ WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TLV_TAG_STRUCT_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TLV_TAG_STRUCT_TX_SEND_PARAMS,
+ WMI_TLV_TAG_STRUCT_HE_RATE_SET,
+ WMI_TLV_TAG_STRUCT_CONGESTION_STATS,
+ WMI_TLV_TAG_STRUCT_SET_INIT_COUNTRY_CMD,
+ WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE,
+ WMI_TLV_TAG_STRUCT_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TLV_TAG_STRUCT_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_CONFIG_REQUEST,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_STATS_EVENT,
+ WMI_TLV_TAG_STRUCT_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TLV_TAG_STRUCT_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CAPABILITIES,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_REQ,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_RING_CFG_RSP,
+ WMI_TLV_TAG_STRUCT_OEM_INDIRECT_DATA,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE,
+ WMI_TLV_TAG_STRUCT_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_LCA_DISALLOW_CONFIG_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_RSSI_REJECTION_OCE_CONFIG_PARAM,
+ WMI_TLV_TAG_STRUCT_UNIT_TEST_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_FILS_OFFLOAD_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TLV_TAG_STRUCT_PMK_CACHE,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TLV_TAG_STRUCT_ROAM_FILS_SYNCH_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_GTK_OFFLOAD_EXTENDED_TLV_PARAM,
+ WMI_TLV_TAG_STRUCT_ROAM_BG_SCAN_ROAMING_PARAM,
+ WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TLV_TAG_STRUCT_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TLV_TAG_STRUCT_OIC_PING_HANDOFF_EVENT,
+ WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TLV_TAG_STRUCT_DHCP_LEASE_RENEW_EVENT,
+ WMI_TLV_TAG_STRUCT_BTM_CONFIG,
+ WMI_TLV_TAG_STRUCT_DEBUG_MESG_FW_DATA_STALL_PARAM,
+ WMI_TLV_TAG_STRUCT_WLM_CONFIG_CMD,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TLV_TAG_STRUCT_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TLV_TAG_STRUCT_ROAM_CND_SCORING_PARAM,
+ WMI_TLV_TAG_STRUCT_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TLV_TAG_STRUCT_VENDOR_OUI_EXT,
+ WMI_TLV_TAG_STRUCT_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TLV_TAG_STRUCT_FD_SEND_FROM_HOST_CMD,
+ WMI_TLV_TAG_STRUCT_ENABLE_FILS_CMD,
+ WMI_TLV_TAG_STRUCT_HOST_SWFDA_EVENT,
+
+ WMI_TLV_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_TLV_SERVICE_STA_PWRSAVE,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_TLV_SERVICE_AP_UAPSD,
+ WMI_TLV_SERVICE_AP_DFS,
+ WMI_TLV_SERVICE_11AC,
+ WMI_TLV_SERVICE_BLOCKACK,
+ WMI_TLV_SERVICE_PHYERR,
+ WMI_TLV_SERVICE_BCN_FILTER,
+ WMI_TLV_SERVICE_RTT,
+ WMI_TLV_SERVICE_WOW,
+ WMI_TLV_SERVICE_RATECTRL_CACHE,
+ WMI_TLV_SERVICE_IRAM_TIDS,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+ WMI_TLV_SERVICE_NLO,
+ WMI_TLV_SERVICE_GTK_OFFLOAD,
+ WMI_TLV_SERVICE_SCAN_SCH,
+ WMI_TLV_SERVICE_CSA_OFFLOAD,
+ WMI_TLV_SERVICE_CHATTER,
+ WMI_TLV_SERVICE_COEX_FREQAVOID,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+ WMI_TLV_SERVICE_FORCE_FW_HANG,
+ WMI_TLV_SERVICE_GPIO,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+ WMI_TLV_SERVICE_TX_ENCAP,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_TLV_SERVICE_EARLY_RX,
+ WMI_TLV_SERVICE_STA_SMPS,
+ WMI_TLV_SERVICE_FWTEST,
+ WMI_TLV_SERVICE_STA_WMMAC,
+ WMI_TLV_SERVICE_TDLS,
+ WMI_TLV_SERVICE_BURST,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_TLV_SERVICE_WLAN_HB,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_TLV_SERVICE_BATCH_SCAN,
+ WMI_TLV_SERVICE_QPOWER,
+ WMI_TLV_SERVICE_PLMREQ,
+ WMI_TLV_SERVICE_THERMAL_MGMT,
+ WMI_TLV_SERVICE_RMC,
+ WMI_TLV_SERVICE_MHF_OFFLOAD,
+ WMI_TLV_SERVICE_COEX_SAR,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_TLV_SERVICE_NAN,
+ WMI_TLV_SERVICE_L1SS_STAT,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_TLV_SERVICE_OBSS_SCAN,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE,
+ WMI_TLV_SERVICE_LPASS,
+ WMI_TLV_SERVICE_EXTSCAN,
+ WMI_TLV_SERVICE_D0WOW,
+ WMI_TLV_SERVICE_HSOFFLOAD,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_TLV_SERVICE_RX_FULL_REORDER,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT,
+ WMI_TLV_SERVICE_OCB,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD,
+ WMI_TLV_SERVICE_MGMT_TX_HTT,
+ WMI_TLV_SERVICE_MGMT_TX_WMI,
+ WMI_TLV_SERVICE_EXT_MSG,
+ WMI_TLV_SERVICE_MAWC,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF,
+ WMI_TLV_SERVICE_EGAP,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA,
+ WMI_TLV_SERVICE_ATF,
+ WMI_TLV_SERVICE_COEX_GPIO,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_TLV_SERVICE_BPF_OFFLOAD,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES,
+ WMI_TLV_SERVICE_NAN_DATA,
+ WMI_TLV_SERVICE_NAN_RTT,
+ WMI_TLV_SERVICE_11AX,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_TLV_SERVICE_MESH_11S,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD,
+ WMI_TLV_SERVICE_RCPI_SUPPORT,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT,
+ WMI_TLV_SERVICE_PEER_STATS_INFO,
+ WMI_TLV_SERVICE_REGULATORY_DB,
+ WMI_TLV_SERVICE_11D_OFFLOAD,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART,
+ WMI_TLV_SERVICE_PKT_ROUTING,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI,
+ WMI_TLV_SERVICE_8SS_TX_BFEE,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT,
+ WMI_TLV_SERVICE_ACK_TIMEOUT,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64,
+ WMI_TLV_MAX_SERVICE = 128,
+
+/* NOTE:
+ * The above service flags are delivered in the wmi_service_bitmap field
+ * of the WMI_TLV_SERVICE_READY_EVENT message.
+ * The below service flags are delivered in a WMI_TLV_SERVICE_AVAILABLE_EVENT
+ * message rather than in the WMI_TLV_SERVICE_READY_EVENT message's
+ * wmi_service_bitmap field.
+ * The WMI_TLV_SERVICE_AVAILABLE_EVENT message immediately precedes the
+ * WMI_TLV_SERVICE_READY_EVENT message.
+ */
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT,
+ WMI_TLV_SERVICE_FILS_SUPPORT,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW,
+ WMI_TLV_SERVICE_MAWC_SUPPORT,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT,
+ WMI_TLV_SERVICE_THERM_THROT,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+
+ WMI_TLV_MAX_EXT_SERVICE = 256,
+};
+
+#define WMI_TLV_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (WMI_TLV_MAX_EXT_SERVICE) && \
+ (svc_id) >= (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 32]) & \
+ BIT(((((svc_id) - (len)) % 32) & 0x1f)))
+
+#define SVCMAP(x, y, len) \
+ do { \
+ if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \
+ (WMI_TLV_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \
+ __set_bit(y, out); \
+ } while (0)
+
+static inline void
+wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
+{
+ SVCMAP(WMI_TLV_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_SERVICE_ROAM_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_TLV_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_TLV_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_TLV_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_TLV_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_TLV_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_TLV_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_TLV_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_TLV_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_ARPNS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_NLO,
+ WMI_SERVICE_NLO, len);
+ SVCMAP(WMI_TLV_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_TLV_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_TLV_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_TLV_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_TLV_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_TLV_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+ SVCMAP(WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+ SVCMAP(WMI_TLV_SERVICE_EARLY_RX,
+ WMI_SERVICE_EARLY_RX, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_SMPS,
+ WMI_SERVICE_STA_SMPS, len);
+ SVCMAP(WMI_TLV_SERVICE_FWTEST,
+ WMI_SERVICE_FWTEST, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_WMMAC,
+ WMI_SERVICE_STA_WMMAC, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS,
+ WMI_SERVICE_TDLS, len);
+ SVCMAP(WMI_TLV_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE, len);
+ SVCMAP(WMI_TLV_SERVICE_ADAPTIVE_OCS,
+ WMI_SERVICE_ADAPTIVE_OCS, len);
+ SVCMAP(WMI_TLV_SERVICE_BA_SSN_SUPPORT,
+ WMI_SERVICE_BA_SSN_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE, len);
+ SVCMAP(WMI_TLV_SERVICE_WLAN_HB,
+ WMI_SERVICE_WLAN_HB, len);
+ SVCMAP(WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_BATCH_SCAN,
+ WMI_SERVICE_BATCH_SCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_QPOWER,
+ WMI_SERVICE_QPOWER, len);
+ SVCMAP(WMI_TLV_SERVICE_PLMREQ,
+ WMI_SERVICE_PLMREQ, len);
+ SVCMAP(WMI_TLV_SERVICE_THERMAL_MGMT,
+ WMI_SERVICE_THERMAL_MGMT, len);
+ SVCMAP(WMI_TLV_SERVICE_RMC,
+ WMI_SERVICE_RMC, len);
+ SVCMAP(WMI_TLV_SERVICE_MHF_OFFLOAD,
+ WMI_SERVICE_MHF_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_COEX_SAR,
+ WMI_SERVICE_COEX_SAR, len);
+ SVCMAP(WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_SERVICE_BCN_TXRATE_OVERRIDE, len);
+ SVCMAP(WMI_TLV_SERVICE_NAN,
+ WMI_SERVICE_NAN, len);
+ SVCMAP(WMI_TLV_SERVICE_L1SS_STAT,
+ WMI_SERVICE_L1SS_STAT, len);
+ SVCMAP(WMI_TLV_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_SERVICE_ESTIMATE_LINKSPEED, len);
+ SVCMAP(WMI_TLV_SERVICE_OBSS_SCAN,
+ WMI_SERVICE_OBSS_SCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_OFFCHAN, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
+ SVCMAP(WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
+ SVCMAP(WMI_TLV_SERVICE_IBSS_PWRSAVE,
+ WMI_SERVICE_IBSS_PWRSAVE, len);
+ SVCMAP(WMI_TLV_SERVICE_LPASS,
+ WMI_SERVICE_LPASS, len);
+ SVCMAP(WMI_TLV_SERVICE_EXTSCAN,
+ WMI_SERVICE_EXTSCAN, len);
+ SVCMAP(WMI_TLV_SERVICE_D0WOW,
+ WMI_SERVICE_D0WOW, len);
+ SVCMAP(WMI_TLV_SERVICE_HSOFFLOAD,
+ WMI_SERVICE_HSOFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_SERVICE_ROAM_HO_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_RX_FULL_REORDER,
+ WMI_SERVICE_RX_FULL_REORDER, len);
+ SVCMAP(WMI_TLV_SERVICE_DHCP_OFFLOAD,
+ WMI_SERVICE_DHCP_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT, len);
+ SVCMAP(WMI_TLV_SERVICE_MDNS_OFFLOAD,
+ WMI_SERVICE_MDNS_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_SERVICE_SAP_AUTH_OFFLOAD, len);
+ SVCMAP(WMI_TLV_SERVICE_MGMT_TX_WMI,
+ WMI_SERVICE_MGMT_TX_WMI, len);
+}
+
+static inline void
+wmi_tlv_svc_map_ext(const __le32 *in, unsigned long *out, size_t len)
+{
+ SVCMAP(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT,
+ WMI_SERVICE_SPOOF_MAC_SUPPORT,
+ WMI_TLV_MAX_SERVICE);
+}
+
+#undef SVCMAP
+
+struct wmi_tlv {
+ __le16 len;
+ __le16 tag;
+ u8 value[0];
+} __packed;
+
+struct ath10k_mgmt_tx_pkt_addr {
+ void *vaddr;
+ dma_addr_t paddr;
+};
+
+struct wmi_tlv_mgmt_tx_compl_ev {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
+#define WMI_TLV_MGMT_RX_NUM_RSSI 4
+
+struct wmi_tlv_mgmt_rx_ev {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+ __le32 rssi[WMI_TLV_MGMT_RX_NUM_RSSI];
+} __packed;
+
+struct wmi_tlv_abi_version {
+ __le32 abi_ver0;
+ __le32 abi_ver1;
+ __le32 abi_ver_ns0;
+ __le32 abi_ver_ns1;
+ __le32 abi_ver_ns2;
+ __le32 abi_ver_ns3;
+} __packed;
+
+enum wmi_tlv_hw_bd_id {
+ WMI_TLV_HW_BD_LEGACY = 0,
+ WMI_TLV_HW_BD_QCA6174 = 1,
+ WMI_TLV_HW_BD_QCA2582 = 2,
+};
+
+struct wmi_tlv_hw_bd_info {
+ u8 rev;
+ u8 project_id;
+ u8 custom_id;
+ u8 reference_design_id;
+} __packed;
+
+struct wmi_tlv_svc_rdy_ev {
+ __le32 fw_build_vers;
+ struct wmi_tlv_abi_version abi;
+ __le32 phy_capability;
+ __le32 max_frag_entry;
+ __le32 num_rf_chains;
+ __le32 ht_cap_info;
+ __le32 vht_cap_info;
+ __le32 vht_supp_mcs;
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable;
+ __le32 max_bcn_ie_size;
+ __le32 num_mem_reqs;
+ __le32 max_num_scan_chans;
+ __le32 hw_bd_id; /* 0 means hw_bd_info is invalid */
+ struct wmi_tlv_hw_bd_info hw_bd_info[5];
+} __packed;
+
+struct wmi_tlv_rdy_ev {
+ struct wmi_tlv_abi_version abi;
+ struct wmi_mac_addr mac_addr;
+ __le32 status;
+} __packed;
+
+struct wmi_tlv_resource_config {
+ __le32 num_vdevs;
+ __le32 num_peers;
+ __le32 num_offload_peers;
+ __le32 num_offload_reorder_bufs;
+ __le32 num_peer_keys;
+ __le32 num_tids;
+ __le32 ast_skid_limit;
+ __le32 tx_chain_mask;
+ __le32 rx_chain_mask;
+ __le32 rx_timeout_pri[4];
+ __le32 rx_decap_mode;
+ __le32 scan_max_pending_reqs;
+ __le32 bmiss_offload_max_vdev;
+ __le32 roam_offload_max_vdev;
+ __le32 roam_offload_max_ap_profiles;
+ __le32 num_mcast_groups;
+ __le32 num_mcast_table_elems;
+ __le32 mcast2ucast_mode;
+ __le32 tx_dbg_log_size;
+ __le32 num_wds_entries;
+ __le32 dma_burst_size;
+ __le32 mac_aggr_delim;
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+ __le32 vow_config;
+ __le32 gtk_offload_max_vdev;
+ __le32 num_msdu_desc;
+ __le32 max_frag_entries;
+ __le32 num_tdls_vdevs;
+ __le32 num_tdls_conn_table_entries;
+ __le32 beacon_tx_offload_max_vdev;
+ __le32 num_multicast_filter_entries;
+ __le32 num_wow_filters;
+ __le32 num_keep_alive_pattern;
+ __le32 keep_alive_pattern_size;
+ __le32 max_tdls_concurrent_sleep_sta;
+ __le32 max_tdls_concurrent_buffer_sta;
+ __le32 wmi_send_separate;
+ __le32 num_ocb_vdevs;
+ __le32 num_ocb_channels;
+ __le32 num_ocb_schedules;
+ __le32 host_capab;
+} __packed;
+
+struct wmi_tlv_init_cmd {
+ struct wmi_tlv_abi_version abi;
+ __le32 num_host_mem_chunks;
+} __packed;
+
+struct wmi_tlv_pdev_get_temp_cmd {
+ __le32 pdev_id; /* not used */
+} __packed;
+
+struct wmi_tlv_pdev_temperature_event {
+ __le32 tlv_hdr;
+ /* temperature value in Celcius degree */
+ __le32 temperature;
+ __le32 pdev_id;
+} __packed;
+
+struct wmi_tlv_pdev_set_param_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+struct wmi_tlv_pdev_set_rd_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 regd;
+ __le32 regd_2ghz;
+ __le32 regd_5ghz;
+ __le32 conform_limit_2ghz;
+ __le32 conform_limit_5ghz;
+} __packed;
+
+struct wmi_tlv_scan_chan_list_cmd {
+ __le32 num_scan_chans;
+} __packed;
+
+struct wmi_scan_prob_req_oui_cmd {
+/* OUI to be used in Probe Request frame when random MAC address is
+ * requested part of scan parameters. This is applied to both FW internal
+ * scans and host initiated scans. Host can request for random MAC address
+ * with WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ flag.
+ */
+ __le32 prob_req_oui;
+} __packed;
+
+struct wmi_tlv_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ __le32 burst_duration_ms;
+ __le32 num_channels;
+ __le32 num_bssids;
+ __le32 num_ssids;
+ __le32 ie_len;
+ __le32 num_probes;
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+} __packed;
+
+struct wmi_tlv_vdev_start_cmd {
+ __le32 vdev_id;
+ __le32 requestor_id;
+ __le32 bcn_intval;
+ __le32 dtim_period;
+ __le32 flags;
+ struct wmi_ssid ssid;
+ __le32 bcn_tx_rate;
+ __le32 bcn_tx_power;
+ __le32 num_noa_descr;
+ __le32 disable_hw_ack;
+} __packed;
+
+enum {
+ WMI_TLV_PEER_TYPE_DEFAULT = 0, /* generic / non-BSS / self-peer */
+ WMI_TLV_PEER_TYPE_BSS = 1,
+ WMI_TLV_PEER_TYPE_TDLS = 2,
+ WMI_TLV_PEER_TYPE_HOST_MAX = 127,
+ WMI_TLV_PEER_TYPE_ROAMOFFLOAD_TMP = 128,
+};
+
+struct wmi_tlv_peer_create_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_addr;
+ __le32 peer_type;
+} __packed;
+
+struct wmi_tlv_peer_assoc_cmd {
+ struct wmi_mac_addr mac_addr;
+ __le32 vdev_id;
+ __le32 new_assoc;
+ __le32 assoc_id;
+ __le32 flags;
+ __le32 caps;
+ __le32 listen_intval;
+ __le32 ht_caps;
+ __le32 max_mpdu;
+ __le32 mpdu_density;
+ __le32 rate_caps;
+ __le32 nss;
+ __le32 vht_caps;
+ __le32 phy_mode;
+ __le32 ht_info[2];
+ __le32 num_legacy_rates;
+ __le32 num_ht_rates;
+} __packed;
+
+struct wmi_tlv_pdev_suspend {
+ __le32 pdev_id; /* not used yet */
+ __le32 opt;
+} __packed;
+
+struct wmi_tlv_pdev_set_wmm_cmd {
+ __le32 pdev_id; /* not used yet */
+ __le32 dg_type; /* no idea.. */
+} __packed;
+
+struct wmi_tlv_vdev_wmm_params {
+ __le32 dummy;
+ struct wmi_wmm_params params;
+} __packed;
+
+struct wmi_tlv_vdev_set_wmm_cmd {
+ __le32 vdev_id;
+ struct wmi_tlv_vdev_wmm_params vdev_wmm_params[4];
+} __packed;
+
+struct wmi_tlv_phyerr_ev {
+ __le32 num_phyerrs;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ __le32 buf_len;
+} __packed;
+
+enum wmi_tlv_dbglog_param {
+ WMI_TLV_DBGLOG_PARAM_LOG_LEVEL = 1,
+ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE,
+ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE,
+ WMI_TLV_DBGLOG_PARAM_VDEV_ENABLE_BITMAP,
+ WMI_TLV_DBGLOG_PARAM_VDEV_DISABLE_BITMAP,
+};
+
+enum wmi_tlv_dbglog_log_level {
+ WMI_TLV_DBGLOG_LOG_LEVEL_VERBOSE = 0,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_1,
+ WMI_TLV_DBGLOG_LOG_LEVEL_INFO_LVL_2,
+ WMI_TLV_DBGLOG_LOG_LEVEL_WARN,
+ WMI_TLV_DBGLOG_LOG_LEVEL_ERR,
+};
+
+#define WMI_TLV_DBGLOG_BITMAP_MAX_IDS 512
+#define WMI_TLV_DBGLOG_BITMAP_MAX_WORDS (WMI_TLV_DBGLOG_BITMAP_MAX_IDS / \
+ sizeof(__le32))
+#define WMI_TLV_DBGLOG_ALL_MODULES 0xffff
+#define WMI_TLV_DBGLOG_LOG_LEVEL_VALUE(module_id, log_level) \
+ (((module_id << 16) & 0xffff0000) | \
+ ((log_level << 0) & 0x000000ff))
+
+struct wmi_tlv_dbglog_cmd {
+ __le32 param;
+ __le32 value;
+} __packed;
+
+struct wmi_tlv_resume_cmd {
+ __le32 reserved;
+} __packed;
+
+struct wmi_tlv_req_stats_cmd {
+ __le32 stats_id; /* wmi_stats_id */
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_tlv_vdev_stats {
+ __le32 vdev_id;
+ __le32 beacon_snr;
+ __le32 data_snr;
+ __le32 num_tx_frames[4]; /* per-AC */
+ __le32 num_rx_frames;
+ __le32 num_tx_frames_retries[4];
+ __le32 num_tx_frames_failures[4];
+ __le32 num_rts_fail;
+ __le32 num_rts_success;
+ __le32 num_rx_err;
+ __le32 num_rx_discard;
+ __le32 num_tx_not_acked;
+ __le32 tx_rate_history[10];
+ __le32 beacon_rssi_history[10];
+} __packed;
+
+struct wmi_tlv_pktlog_enable {
+ __le32 reserved;
+ __le32 filter;
+} __packed;
+
+struct wmi_tlv_pktlog_disable {
+ __le32 reserved;
+} __packed;
+
+enum wmi_tlv_bcn_tx_status {
+ WMI_TLV_BCN_TX_STATUS_OK,
+ WMI_TLV_BCN_TX_STATUS_XRETRY,
+ WMI_TLV_BCN_TX_STATUS_DROP,
+ WMI_TLV_BCN_TX_STATUS_FILTERED,
+};
+
+struct wmi_tlv_bcn_tx_status_ev {
+ __le32 vdev_id;
+ __le32 tx_status;
+} __packed;
+
+struct wmi_tlv_bcn_prb_info {
+ __le32 caps;
+ __le32 erp;
+ u8 ies[0];
+} __packed;
+
+struct wmi_tlv_bcn_tmpl_cmd {
+ __le32 vdev_id;
+ __le32 tim_ie_offset;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_tlv_prb_tmpl_cmd {
+ __le32 vdev_id;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_tlv_p2p_go_bcn_ie {
+ __le32 vdev_id;
+ __le32 ie_len;
+} __packed;
+
+enum wmi_tlv_diag_item_type {
+ WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
+ WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
+ WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
+};
+
+struct wmi_tlv_diag_item {
+ u8 type;
+ u8 reserved;
+ __le16 len;
+ __le32 timestamp;
+ __le32 code;
+ u8 payload[0];
+} __packed;
+
+struct wmi_tlv_diag_data_ev {
+ __le32 num_items;
+} __packed;
+
+struct wmi_tlv_sta_keepalive_cmd {
+ __le32 vdev_id;
+ __le32 enabled;
+ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 interval; /* in seconds */
+} __packed;
+
+struct wmi_tlv_stats_ev {
+ __le32 stats_id; /* WMI_STAT_ */
+ __le32 num_pdev_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ __le32 num_chan_stats;
+} __packed;
+
+struct wmi_tlv_p2p_noa_ev {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_tlv_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+} __packed;
+
+struct wmi_tlv_wow_add_del_event_cmd {
+ __le32 vdev_id;
+ __le32 is_add;
+ __le32 event_bitmap;
+} __packed;
+
+/* Command to set/unset chip in quiet mode */
+struct wmi_tlv_set_quiet_cmd {
+ __le32 vdev_id;
+
+ /* in TUs */
+ __le32 period;
+
+ /* in TUs */
+ __le32 duration;
+
+ /* offset in TUs */
+ __le32 next_start;
+ __le32 enabled;
+} __packed;
+
+struct wmi_tlv_wow_enable_cmd {
+ __le32 enable;
+} __packed;
+
+struct wmi_tlv_wow_host_wakeup_ind {
+ __le32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+ __le32 vdev_id;
+ __le32 flag;
+ __le32 wake_reason;
+ __le32 data_len;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+struct wmi_tlv_wow_bitmap_pattern {
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ __le32 pattern_offset;
+ __le32 pattern_len;
+ __le32 bitmask_len;
+ __le32 pattern_id;
+} __packed;
+
+struct wmi_tlv_wow_add_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+struct wmi_tlv_wow_del_pattern_cmd {
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+/* TDLS Options */
+enum wmi_tlv_tdls_options {
+ WMI_TLV_TDLS_OFFCHAN_EN = BIT(0),
+ WMI_TLV_TDLS_BUFFER_STA_EN = BIT(1),
+ WMI_TLV_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+struct wmi_tdls_set_state_cmd {
+ __le32 vdev_id;
+ __le32 state;
+ __le32 notification_interval_ms;
+ __le32 tx_discovery_threshold;
+ __le32 tx_teardown_threshold;
+ __le32 rssi_teardown_threshold;
+ __le32 rssi_delta;
+ __le32 tdls_options;
+ __le32 tdls_peer_traffic_ind_window;
+ __le32 tdls_peer_traffic_response_timeout_ms;
+ __le32 tdls_puapsd_mask;
+ __le32 tdls_puapsd_inactivity_time_ms;
+ __le32 tdls_puapsd_rx_frame_threshold;
+} __packed;
+
+struct wmi_tdls_peer_update_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_state;
+} __packed;
+
+enum {
+ WMI_TLV_TDLS_PEER_QOS_AC_VO = BIT(0),
+ WMI_TLV_TDLS_PEER_QOS_AC_VI = BIT(1),
+ WMI_TLV_TDLS_PEER_QOS_AC_BK = BIT(2),
+ WMI_TLV_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+#define WMI_TLV_TDLS_PEER_SP_MASK 0x60
+#define WMI_TLV_TDLS_PEER_SP_LSB 5
+
+struct wmi_tdls_peer_capab {
+ __le32 peer_qos;
+ __le32 buff_sta_support;
+ __le32 off_chan_support;
+ __le32 peer_curr_operclass;
+ __le32 self_curr_operclass;
+ __le32 peer_chan_len;
+ __le32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ __le32 is_peer_responder;
+ __le32 pref_offchan_num;
+ __le32 pref_offchan_bw;
+} __packed;
+
+struct wmi_tlv_adaptive_qcs {
+ __le32 enable;
+} __packed;
+
+/**
+ * wmi_tlv_tx_pause_id - firmware tx queue pause reason types
+ *
+ * @WMI_TLV_TX_PAUSE_ID_MCC: used for by multi-channel firmware scheduler.
+ * Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: peer in AP mode is asleep.
+ * Only peer_id is valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: Only vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: Only peer_id and tid_map are valid.
+ * @WMI_TLV_TX_PAUSE_ID_AP_PS: When all peers are asleep in AP mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_IBSS_PS: When all peers are asleep in IBSS mode. Only
+ * vdev_map is valid.
+ * @WMI_TLV_TX_PAUSE_ID_HOST: Host itself requested tx pause.
+ */
+enum wmi_tlv_tx_pause_id {
+ WMI_TLV_TX_PAUSE_ID_MCC = 1,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_PS = 2,
+ WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD = 3,
+ WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA = 4,
+ WMI_TLV_TX_PAUSE_ID_P2P_GO_PS = 5,
+ WMI_TLV_TX_PAUSE_ID_STA_ADD_BA = 6,
+ WMI_TLV_TX_PAUSE_ID_AP_PS = 7,
+ WMI_TLV_TX_PAUSE_ID_IBSS_PS = 8,
+ WMI_TLV_TX_PAUSE_ID_HOST = 21,
+};
+
+enum wmi_tlv_tx_pause_action {
+ WMI_TLV_TX_PAUSE_ACTION_STOP,
+ WMI_TLV_TX_PAUSE_ACTION_WAKE,
+};
+
+struct wmi_tlv_tx_pause_ev {
+ __le32 pause_id;
+ __le32 action;
+ __le32 vdev_map;
+ __le32 peer_id;
+ __le32 tid_map;
+} __packed;
+
+struct wmi_tlv_tdls_peer_event {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_status;
+ __le32 peer_reason;
+ __le32 vdev_id;
+} __packed;
+
+void ath10k_wmi_tlv_attach(struct ath10k *ar);
+
+struct wmi_tlv_mgmt_tx_cmd {
+ __le32 vdev_id;
+ __le32 desc_id;
+ __le32 chanfreq;
+ __le64 paddr;
+ __le32 frame_len;
+ __le32 buf_len;
+} __packed;
+#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
new file mode 100644
index 000000000..41eb57be9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -0,0 +1,9235 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "core.h"
+#include "htc.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-tlv.h"
+#include "mac.h"
+#include "testmode.h"
+#include "wmi-ops.h"
+#include "p2p.h"
+#include "hw.h"
+#include "hif.h"
+#include "txrx.h"
+
+#define ATH10K_WMI_BARRIER_ECHO_ID 0xBA991E9
+#define ATH10K_WMI_BARRIER_TIMEOUT_HZ (3 * HZ)
+#define ATH10K_WMI_DFS_CONF_TIMEOUT_HZ (HZ / 6)
+
+/* MAIN WMI cmd track */
+static struct wmi_cmd_map wmi_cmd_map = {
+ .init_cmdid = WMI_INIT_CMDID,
+ .start_scan_cmdid = WMI_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
+ .network_list_offload_config_cmdid =
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
+ .gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
+ .peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
+ .peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
+ .sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
+ .sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ .sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
+ .echo_cmdid = WMI_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
+ .vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.X WMI cmd track */
+static struct wmi_cmd_map wmi_10x_cmd_map = {
+ .init_cmdid = WMI_10X_INIT_CMDID,
+ .start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10X_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10X_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.2.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
+ .init_cmdid = WMI_10_2_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_2_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+ .pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_nfcal_power_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ast_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_dscp_tid_map_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
+ .mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
+ .set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid =
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+/* 10.4 WMI cmd track */
+static struct wmi_cmd_map wmi_10_4_cmd_map = {
+ .init_cmdid = WMI_10_4_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_4_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_4_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_4_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_4_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_dscp_tid_map_cmdid = WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_4_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_4_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_4_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_4_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_4_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_4_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_4_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_4_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_4_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_4_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_4_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_4_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_4_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_4_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_10_4_BCN_PRB_TMPL_CMDID,
+ .bcn_filter_rx_cmdid = WMI_10_4_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_4_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_10_4_PRB_TMPL_CMDID,
+ .addba_clear_resp_cmdid = WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_4_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_4_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_4_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_4_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_4_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_4_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_4_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_4_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_4_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+ .ap_ps_peer_param_cmdid = WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+ .peer_rate_retry_sched_cmdid = WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_4_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_4_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_4_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_4_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_4_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_4_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_4_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_4_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_10_4_GTK_OFFLOAD_CMDID,
+ .csa_offload_enable_cmdid = WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+ .csa_offload_chanswitch_cmdid = WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_4_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_4_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_4_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_4_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+ .vdev_get_keepalive_cmdid = WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+ .force_fw_hang_cmdid = WMI_10_4_FORCE_FW_HANG_CMDID,
+ .gpio_config_cmdid = WMI_10_4_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_4_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+ .vdev_set_wmm_params_cmdid = WMI_CMD_UNSUPPORTED,
+ .adaptive_qcs_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+ .vdev_standby_response_cmdid = WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+ .vdev_resume_response_cmdid = WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+ .wlan_peer_caching_add_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+ .wlan_peer_caching_evict_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+ .wlan_peer_caching_restore_peer_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+ .wlan_peer_caching_print_all_peers_info_cmdid =
+ WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+ .peer_update_wds_entry_cmdid = WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+ .peer_add_proxy_sta_entry_cmdid =
+ WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ .rtt_keepalive_cmdid = WMI_10_4_RTT_KEEPALIVE_CMDID,
+ .oem_req_cmdid = WMI_10_4_OEM_REQ_CMDID,
+ .nan_cmdid = WMI_10_4_NAN_CMDID,
+ .vdev_ratemask_cmdid = WMI_10_4_VDEV_RATEMASK_CMDID,
+ .qboost_cfg_cmdid = WMI_10_4_QBOOST_CFG_CMDID,
+ .pdev_smart_ant_enable_cmdid = WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+ .pdev_smart_ant_set_rx_antenna_cmdid =
+ WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ .peer_smart_ant_set_tx_antenna_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ .peer_smart_ant_set_train_info_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ .peer_smart_ant_set_node_config_ops_cmdid =
+ WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ .pdev_set_antenna_switch_table_cmdid =
+ WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ .pdev_set_ctl_table_cmdid = WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+ .pdev_set_mimogain_table_cmdid = WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ .pdev_ratepwr_table_cmdid = WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+ .pdev_ratepwr_chainmsk_table_cmdid =
+ WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ .pdev_fips_cmdid = WMI_10_4_PDEV_FIPS_CMDID,
+ .tt_set_conf_cmdid = WMI_10_4_TT_SET_CONF_CMDID,
+ .fwtest_cmdid = WMI_10_4_FWTEST_CMDID,
+ .vdev_atf_request_cmdid = WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+ .peer_atf_request_cmdid = WMI_10_4_PEER_ATF_REQUEST_CMDID,
+ .pdev_get_ani_cck_config_cmdid = WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ .pdev_get_ani_ofdm_config_cmdid =
+ WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ .pdev_reserve_ast_entry_cmdid = WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+ .pdev_get_nfcal_power_cmdid = WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+ .pdev_get_tpc_cmdid = WMI_10_4_PDEV_GET_TPC_CMDID,
+ .pdev_get_ast_info_cmdid = WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+ .vdev_set_dscp_tid_map_cmdid = WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+ .pdev_get_info_cmdid = WMI_10_4_PDEV_GET_INFO_CMDID,
+ .vdev_get_info_cmdid = WMI_10_4_VDEV_GET_INFO_CMDID,
+ .vdev_filter_neighbor_rx_packets_cmdid =
+ WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ .mu_cal_start_cmdid = WMI_10_4_MU_CAL_START_CMDID,
+ .set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
+ .pdev_bss_chan_info_request_cmdid =
+ WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
+ .vdev_set_ie_cmdid = WMI_10_4_VDEV_SET_IE_CMDID,
+ .set_lteu_config_cmdid = WMI_10_4_SET_LTEU_CONFIG_CMDID,
+ .atf_ssid_grouping_request_cmdid =
+ WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
+ .peer_atf_ext_request_cmdid = WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
+ .set_periodic_channel_stats_cfg_cmdid =
+ WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ .peer_bwf_request_cmdid = WMI_10_4_PEER_BWF_REQUEST_CMDID,
+ .btcoex_cfg_cmdid = WMI_10_4_BTCOEX_CFG_CMDID,
+ .peer_tx_mu_txmit_count_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
+ .peer_tx_mu_txmit_rstcnt_cmdid = WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
+ .peer_gid_userpos_list_cmdid = WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
+ .pdev_check_cal_version_cmdid = WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
+ .coex_version_cfg_cmid = WMI_10_4_COEX_VERSION_CFG_CMID,
+ .pdev_get_rx_filter_cmdid = WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
+ .pdev_extended_nss_cfg_cmdid = WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
+ .vdev_set_scan_nac_rssi_cmdid = WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
+ .prog_gpio_band_select_cmdid = WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
+ .config_smart_logging_cmdid = WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
+ .debug_fatal_condition_cmdid = WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
+ .get_tsf_timer_cmdid = WMI_10_4_GET_TSF_TIMER_CMDID,
+ .pdev_get_tpc_table_cmdid = WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
+ .vdev_sifs_trigger_time_cmdid = WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
+ .pdev_wds_entry_list_cmdid = WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
+ .tdls_set_state_cmdid = WMI_10_4_TDLS_SET_STATE_CMDID,
+ .tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
+ .tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
+ .radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
+};
+
+/* MAIN WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_vdev_param_map = {
+ .rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_VDEV_PARAM_WDS,
+ .atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_VDEV_PARAM_SGI,
+ .ldpc = WMI_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_VDEV_PARAM_UNSUPPORTED,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+/* 10.X WMI VDEV param map */
+static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
+ .rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10X_VDEV_PARAM_WDS,
+ .atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
+ .feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10X_VDEV_PARAM_SGI,
+ .ldpc = WMI_10X_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10X_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_VDEV_PARAM_UNSUPPORTED,
+ .packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
+ .drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
+ .tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
+ .cabq_maxdur = WMI_VDEV_PARAM_UNSUPPORTED,
+ .mfptest_set = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht_sgimask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .vht80_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_enable = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_tgt_bmiss_num = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_bmiss_sample_cycle = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_slop_step = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_init_slop = WMI_VDEV_PARAM_UNSUPPORTED,
+ .early_rx_adjust_pause = WMI_VDEV_PARAM_UNSUPPORTED,
+ .proxy_sta = WMI_VDEV_PARAM_UNSUPPORTED,
+ .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
+ .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+ .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
+ .rts_threshold = WMI_10_4_VDEV_PARAM_RTS_THRESHOLD,
+ .fragmentation_threshold = WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ .beacon_interval = WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+ .listen_interval = WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+ .multicast_rate = WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+ .mgmt_tx_rate = WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+ .slot_time = WMI_10_4_VDEV_PARAM_SLOT_TIME,
+ .preamble = WMI_10_4_VDEV_PARAM_PREAMBLE,
+ .swba_time = WMI_10_4_VDEV_PARAM_SWBA_TIME,
+ .wmi_vdev_stats_update_period = WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+ .wmi_vdev_pwrsave_ageout_time = WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+ .wmi_vdev_host_swba_interval = WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+ .dtim_period = WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+ .wmi_vdev_oc_scheduler_air_time_limit =
+ WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ .wds = WMI_10_4_VDEV_PARAM_WDS,
+ .atim_window = WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+ .bmiss_count_max = WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+ .bmiss_first_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+ .bmiss_final_bcnt = WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+ .feature_wmm = WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+ .chwidth = WMI_10_4_VDEV_PARAM_CHWIDTH,
+ .chextoffset = WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+ .disable_htprotection = WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+ .sta_quickkickout = WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+ .mgmt_rate = WMI_10_4_VDEV_PARAM_MGMT_RATE,
+ .protection_mode = WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+ .fixed_rate = WMI_10_4_VDEV_PARAM_FIXED_RATE,
+ .sgi = WMI_10_4_VDEV_PARAM_SGI,
+ .ldpc = WMI_10_4_VDEV_PARAM_LDPC,
+ .tx_stbc = WMI_10_4_VDEV_PARAM_TX_STBC,
+ .rx_stbc = WMI_10_4_VDEV_PARAM_RX_STBC,
+ .intra_bss_fwd = WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+ .def_keyid = WMI_10_4_VDEV_PARAM_DEF_KEYID,
+ .nss = WMI_10_4_VDEV_PARAM_NSS,
+ .bcast_data_rate = WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+ .mcast_data_rate = WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+ .mcast_indicate = WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+ .dhcp_indicate = WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+ .unknown_dest_indicate = WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ .ap_keepalive_min_idle_inactive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_idle_inactive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ .ap_keepalive_max_unresponsive_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ .ap_enable_nawds = WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+ .mcast2ucast_set = WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+ .enable_rtscts = WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+ .txbf = WMI_10_4_VDEV_PARAM_TXBF,
+ .packet_powersave = WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+ .drop_unencry = WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+ .tx_encap_type = WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+ .ap_detect_out_of_sync_sleeping_sta_time_secs =
+ WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ .rc_num_retries = WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+ .cabq_maxdur = WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+ .mfptest_set = WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+ .rts_fixed_rate = WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+ .vht_sgimask = WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+ .vht80_ratemask = WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+ .early_rx_adjust_enable = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ .early_rx_tgt_bmiss_num = WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ .early_rx_bmiss_sample_cycle =
+ WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ .early_rx_slop_step = WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ .early_rx_init_slop = WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ .early_rx_adjust_pause = WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ .proxy_sta = WMI_10_4_VDEV_PARAM_PROXY_STA,
+ .meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
+ .rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+ .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
+ .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
+};
+
+static struct wmi_pdev_param_map wmi_pdev_param_map = {
+ .tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_PDEV_PARAM_DCS,
+ .ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
+ .burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cal_period = WMI_PDEV_PARAM_UNSUPPORTED,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_2_4_pdev_param_map = {
+ .tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ .dcs = WMI_10X_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
+ .idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
+ .power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
+ .fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_decap_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .smart_antenna_default_antenna = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .antenna_gain = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rx_filter = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_to_ucast_tid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .proxy_sta_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_mode = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .remove_mcast2ucast_buffer = WMI_PDEV_PARAM_UNSUPPORTED,
+ .peer_sta_ps_statechg_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .igmpmld_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
+ .block_interbss = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_disable_reset_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_msdu_ttl_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_ppdu_duration_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .txbf_sound_period_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_promisc_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_burst_mode_cmdid = WMI_PDEV_PARAM_UNSUPPORTED,
+ .en_stats = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mu_group_policy = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_detection = WMI_PDEV_PARAM_UNSUPPORTED,
+ .noise_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .dpd_enable = WMI_PDEV_PARAM_UNSUPPORTED,
+ .set_mcast_bcast_echo = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_strict_sch = WMI_PDEV_PARAM_UNSUPPORTED,
+ .atf_sched_duration = WMI_PDEV_PARAM_UNSUPPORTED,
+ .ant_plzn = WMI_PDEV_PARAM_UNSUPPORTED,
+ .mgmt_retry_limit = WMI_PDEV_PARAM_UNSUPPORTED,
+ .sensitivity_level = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_2g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .signed_txpower_5g = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_amsdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_per_tid_ampdu = WMI_PDEV_PARAM_UNSUPPORTED,
+ .cca_threshold = WMI_PDEV_PARAM_UNSUPPORTED,
+ .rts_fixed_rate = WMI_PDEV_PARAM_UNSUPPORTED,
+ .pdev_reset = WMI_10X_PDEV_PARAM_PDEV_RESET,
+ .wapi_mbssid_offset = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_srcaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .arp_dstaddr = WMI_PDEV_PARAM_UNSUPPORTED,
+ .enable_btcoex = WMI_PDEV_PARAM_UNSUPPORTED,
+};
+
+/* firmware 10.2 specific mappings */
+static struct wmi_cmd_map wmi_10_2_cmd_map = {
+ .init_cmdid = WMI_10_2_INIT_CMDID,
+ .start_scan_cmdid = WMI_10_2_START_SCAN_CMDID,
+ .stop_scan_cmdid = WMI_10_2_STOP_SCAN_CMDID,
+ .scan_chan_list_cmdid = WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ .scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_prob_req_oui_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_regdomain_cmdid = WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ .pdev_set_channel_cmdid = WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ .pdev_set_param_cmdid = WMI_10_2_PDEV_SET_PARAM_CMDID,
+ .pdev_pktlog_enable_cmdid = WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ .pdev_pktlog_disable_cmdid = WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ .pdev_set_wmm_params_cmdid = WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ .pdev_set_ht_cap_ie_cmdid = WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ .pdev_set_vht_cap_ie_cmdid = WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ .pdev_set_quiet_mode_cmdid = WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ .pdev_green_ap_ps_enable_cmdid = WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ .pdev_get_tpc_config_cmdid = WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ .pdev_set_base_macaddr_cmdid = WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ .vdev_create_cmdid = WMI_10_2_VDEV_CREATE_CMDID,
+ .vdev_delete_cmdid = WMI_10_2_VDEV_DELETE_CMDID,
+ .vdev_start_request_cmdid = WMI_10_2_VDEV_START_REQUEST_CMDID,
+ .vdev_restart_request_cmdid = WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ .vdev_up_cmdid = WMI_10_2_VDEV_UP_CMDID,
+ .vdev_stop_cmdid = WMI_10_2_VDEV_STOP_CMDID,
+ .vdev_down_cmdid = WMI_10_2_VDEV_DOWN_CMDID,
+ .vdev_set_param_cmdid = WMI_10_2_VDEV_SET_PARAM_CMDID,
+ .vdev_install_key_cmdid = WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ .peer_create_cmdid = WMI_10_2_PEER_CREATE_CMDID,
+ .peer_delete_cmdid = WMI_10_2_PEER_DELETE_CMDID,
+ .peer_flush_tids_cmdid = WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ .peer_set_param_cmdid = WMI_10_2_PEER_SET_PARAM_CMDID,
+ .peer_assoc_cmdid = WMI_10_2_PEER_ASSOC_CMDID,
+ .peer_add_wds_entry_cmdid = WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ .peer_remove_wds_entry_cmdid = WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ .peer_mcast_group_cmdid = WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ .bcn_tx_cmdid = WMI_10_2_BCN_TX_CMDID,
+ .pdev_send_bcn_cmdid = WMI_10_2_PDEV_SEND_BCN_CMDID,
+ .bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .bcn_filter_rx_cmdid = WMI_10_2_BCN_FILTER_RX_CMDID,
+ .prb_req_filter_rx_cmdid = WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ .mgmt_tx_cmdid = WMI_10_2_MGMT_TX_CMDID,
+ .prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
+ .addba_clear_resp_cmdid = WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ .addba_send_cmdid = WMI_10_2_ADDBA_SEND_CMDID,
+ .addba_status_cmdid = WMI_10_2_ADDBA_STATUS_CMDID,
+ .delba_send_cmdid = WMI_10_2_DELBA_SEND_CMDID,
+ .addba_set_resp_cmdid = WMI_10_2_ADDBA_SET_RESP_CMDID,
+ .send_singleamsdu_cmdid = WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ .sta_powersave_mode_cmdid = WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ .sta_powersave_param_cmdid = WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ .sta_mimo_ps_mode_cmdid = WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ .pdev_dfs_enable_cmdid = WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ .pdev_dfs_disable_cmdid = WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ .roam_scan_mode = WMI_10_2_ROAM_SCAN_MODE,
+ .roam_scan_rssi_threshold = WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ .roam_scan_period = WMI_10_2_ROAM_SCAN_PERIOD,
+ .roam_scan_rssi_change_threshold =
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ .roam_ap_profile = WMI_10_2_ROAM_AP_PROFILE,
+ .ofl_scan_add_ap_profile = WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ .ofl_scan_remove_ap_profile = WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ .ofl_scan_period = WMI_10_2_OFL_SCAN_PERIOD,
+ .p2p_dev_set_device_info = WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ .p2p_dev_set_discoverability = WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ .p2p_go_set_beacon_ie = WMI_10_2_P2P_GO_SET_BEACON_IE,
+ .p2p_go_set_probe_resp_ie = WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ .p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
+ .ap_ps_peer_param_cmdid = WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ .ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_rate_retry_sched_cmdid = WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ .wlan_profile_trigger_cmdid = WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ .wlan_profile_set_hist_intvl_cmdid =
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ .wlan_profile_get_profile_data_cmdid =
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ .wlan_profile_enable_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ .wlan_profile_list_profile_id_cmdid =
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ .pdev_suspend_cmdid = WMI_10_2_PDEV_SUSPEND_CMDID,
+ .pdev_resume_cmdid = WMI_10_2_PDEV_RESUME_CMDID,
+ .add_bcn_filter_cmdid = WMI_10_2_ADD_BCN_FILTER_CMDID,
+ .rmv_bcn_filter_cmdid = WMI_10_2_RMV_BCN_FILTER_CMDID,
+ .wow_add_wake_pattern_cmdid = WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ .wow_del_wake_pattern_cmdid = WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ .wow_enable_disable_wake_event_cmdid =
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ .wow_enable_cmdid = WMI_10_2_WOW_ENABLE_CMDID,
+ .wow_hostwakeup_from_sleep_cmdid =
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ .rtt_measreq_cmdid = WMI_10_2_RTT_MEASREQ_CMDID,
+ .rtt_tsf_cmdid = WMI_10_2_RTT_TSF_CMDID,
+ .vdev_spectral_scan_configure_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ .vdev_spectral_scan_enable_cmdid =
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ .request_stats_cmdid = WMI_10_2_REQUEST_STATS_CMDID,
+ .set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
+ .chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
+ .sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
+ .echo_cmdid = WMI_10_2_ECHO_CMDID,
+ .pdev_utf_cmdid = WMI_10_2_PDEV_UTF_CMDID,
+ .dbglog_cfg_cmdid = WMI_10_2_DBGLOG_CFG_CMDID,
+ .pdev_qvit_cmdid = WMI_10_2_PDEV_QVIT_CMDID,
+ .pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
+ .gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
+ .gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
+ .pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
+ .scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_add_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_evict_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_restore_peer_cmdid = WMI_CMD_UNSUPPORTED,
+ .wlan_peer_caching_print_all_peers_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_update_wds_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_add_proxy_sta_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .rtt_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
+ .oem_req_cmdid = WMI_CMD_UNSUPPORTED,
+ .nan_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_ratemask_cmdid = WMI_CMD_UNSUPPORTED,
+ .qboost_cfg_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_enable_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_smart_ant_set_rx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_tx_antenna_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_train_info_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_smart_ant_set_node_config_ops_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_antenna_switch_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_ctl_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_set_mimogain_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_ratepwr_chainmsk_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_fips_cmdid = WMI_CMD_UNSUPPORTED,
+ .tt_set_conf_cmdid = WMI_CMD_UNSUPPORTED,
+ .fwtest_cmdid = WMI_CMD_UNSUPPORTED,
+ .vdev_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .peer_atf_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_cck_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_ani_ofdm_config_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_reserve_ast_entry_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED,
+ .radar_found_cmdid = WMI_CMD_UNSUPPORTED,
+};
+
+static struct wmi_pdev_param_map wmi_10_4_pdev_param_map = {
+ .tx_chain_mask = WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK,
+ .rx_chain_mask = WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+ .txpower_limit2g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+ .txpower_limit5g = WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+ .txpower_scale = WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+ .beacon_gen_mode = WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+ .beacon_tx_mode = WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+ .resmgr_offchan_mode = WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ .protection_mode = WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+ .dynamic_bw = WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+ .non_agg_sw_retry_th = WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ .agg_sw_retry_th = WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+ .sta_kickout_th = WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+ .ac_aggrsize_scaling = WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ .ltr_enable = WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+ .ltr_ac_latency_be = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ .ltr_ac_latency_bk = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ .ltr_ac_latency_vi = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ .ltr_ac_latency_vo = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ .ltr_ac_latency_timeout = WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ .ltr_sleep_override = WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ .ltr_rx_override = WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+ .ltr_tx_activity_timeout = WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ .l1ss_enable = WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+ .dsleep_enable = WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+ .pcielp_txbuf_flush = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ .pcielp_txbuf_watermark = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ .pcielp_txbuf_tmo_en = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ .pcielp_txbuf_tmo_value = WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ .pdev_stats_update_period =
+ WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ .vdev_stats_update_period =
+ WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ .peer_stats_update_period =
+ WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ .bcnflt_stats_update_period =
+ WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ .pmf_qos = WMI_10_4_PDEV_PARAM_PMF_QOS,
+ .arp_ac_override = WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+ .dcs = WMI_10_4_PDEV_PARAM_DCS,
+ .ani_enable = WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+ .ani_poll_period = WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+ .ani_listen_period = WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ .ani_ofdm_level = WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+ .ani_cck_level = WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+ .dyntxchain = WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+ .proxy_sta = WMI_10_4_PDEV_PARAM_PROXY_STA,
+ .idle_ps_config = WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+ .power_gating_sleep = WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+ .fast_channel_reset = WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+ .burst_dur = WMI_10_4_PDEV_PARAM_BURST_DUR,
+ .burst_enable = WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+ .cal_period = WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+ .aggr_burst = WMI_10_4_PDEV_PARAM_AGGR_BURST,
+ .rx_decap_mode = WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+ .smart_antenna_default_antenna =
+ WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ .igmpmld_override = WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ .igmpmld_tid = WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+ .antenna_gain = WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+ .rx_filter = WMI_10_4_PDEV_PARAM_RX_FILTER,
+ .set_mcast_to_ucast_tid = WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+ .proxy_sta_mode = WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+ .set_mcast2ucast_mode = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ .set_mcast2ucast_buffer = WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ .remove_mcast2ucast_buffer =
+ WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ .peer_sta_ps_statechg_enable =
+ WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ .igmpmld_ac_override = WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ .block_interbss = WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+ .set_disable_reset_cmdid = WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ .set_msdu_ttl_cmdid = WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ .set_ppdu_duration_cmdid = WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ .txbf_sound_period_cmdid = WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ .set_promisc_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ .set_burst_mode_cmdid = WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ .en_stats = WMI_10_4_PDEV_PARAM_EN_STATS,
+ .mu_group_policy = WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+ .noise_detection = WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+ .noise_threshold = WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+ .dpd_enable = WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+ .set_mcast_bcast_echo = WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ .atf_strict_sch = WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+ .atf_sched_duration = WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+ .ant_plzn = WMI_10_4_PDEV_PARAM_ANT_PLZN,
+ .mgmt_retry_limit = WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ .sensitivity_level = WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+ .signed_txpower_2g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ .signed_txpower_5g = WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ .enable_per_tid_amsdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ .enable_per_tid_ampdu = WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ .cca_threshold = WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+ .rts_fixed_rate = WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+ .pdev_reset = WMI_10_4_PDEV_PARAM_PDEV_RESET,
+ .wapi_mbssid_offset = WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ .arp_srcaddr = WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+ .arp_dstaddr = WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+ .enable_btcoex = WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
+};
+
+static const struct wmi_peer_flags_map wmi_peer_flags_map = {
+ .auth = WMI_PEER_AUTH,
+ .qos = WMI_PEER_QOS,
+ .need_ptk_4_way = WMI_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_PEER_APSD,
+ .ht = WMI_PEER_HT,
+ .bw40 = WMI_PEER_40MHZ,
+ .stbc = WMI_PEER_STBC,
+ .ldbc = WMI_PEER_LDPC,
+ .dyn_mimops = WMI_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_PEER_SPATIAL_MUX,
+ .vht = WMI_PEER_VHT,
+ .bw80 = WMI_PEER_80MHZ,
+ .vht_2g = WMI_PEER_VHT_2G,
+ .pmf = WMI_PEER_PMF,
+ .bw160 = WMI_PEER_160MHZ,
+};
+
+static const struct wmi_peer_flags_map wmi_10x_peer_flags_map = {
+ .auth = WMI_10X_PEER_AUTH,
+ .qos = WMI_10X_PEER_QOS,
+ .need_ptk_4_way = WMI_10X_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_10X_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_10X_PEER_APSD,
+ .ht = WMI_10X_PEER_HT,
+ .bw40 = WMI_10X_PEER_40MHZ,
+ .stbc = WMI_10X_PEER_STBC,
+ .ldbc = WMI_10X_PEER_LDPC,
+ .dyn_mimops = WMI_10X_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_10X_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_10X_PEER_SPATIAL_MUX,
+ .vht = WMI_10X_PEER_VHT,
+ .bw80 = WMI_10X_PEER_80MHZ,
+ .bw160 = WMI_10X_PEER_160MHZ,
+};
+
+static const struct wmi_peer_flags_map wmi_10_2_peer_flags_map = {
+ .auth = WMI_10_2_PEER_AUTH,
+ .qos = WMI_10_2_PEER_QOS,
+ .need_ptk_4_way = WMI_10_2_PEER_NEED_PTK_4_WAY,
+ .need_gtk_2_way = WMI_10_2_PEER_NEED_GTK_2_WAY,
+ .apsd = WMI_10_2_PEER_APSD,
+ .ht = WMI_10_2_PEER_HT,
+ .bw40 = WMI_10_2_PEER_40MHZ,
+ .stbc = WMI_10_2_PEER_STBC,
+ .ldbc = WMI_10_2_PEER_LDPC,
+ .dyn_mimops = WMI_10_2_PEER_DYN_MIMOPS,
+ .static_mimops = WMI_10_2_PEER_STATIC_MIMOPS,
+ .spatial_mux = WMI_10_2_PEER_SPATIAL_MUX,
+ .vht = WMI_10_2_PEER_VHT,
+ .bw80 = WMI_10_2_PEER_80MHZ,
+ .vht_2g = WMI_10_2_PEER_VHT_2G,
+ .pmf = WMI_10_2_PEER_PMF,
+ .bw160 = WMI_10_2_PEER_160MHZ,
+};
+
+void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg)
+{
+ u32 flags = 0;
+
+ memset(ch, 0, sizeof(*ch));
+
+ if (arg->passive)
+ flags |= WMI_CHAN_FLAG_PASSIVE;
+ if (arg->allow_ibss)
+ flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
+ if (arg->allow_ht)
+ flags |= WMI_CHAN_FLAG_ALLOW_HT;
+ if (arg->allow_vht)
+ flags |= WMI_CHAN_FLAG_ALLOW_VHT;
+ if (arg->ht40plus)
+ flags |= WMI_CHAN_FLAG_HT40_PLUS;
+ if (arg->chan_radar)
+ flags |= WMI_CHAN_FLAG_DFS;
+
+ ch->mhz = __cpu_to_le32(arg->freq);
+ ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
+ if (arg->mode == MODE_11AC_VHT80_80)
+ ch->band_center_freq2 = __cpu_to_le32(arg->band_center_freq2);
+ else
+ ch->band_center_freq2 = 0;
+ ch->min_power = arg->min_power;
+ ch->max_power = arg->max_power;
+ ch->reg_power = arg->max_reg_power;
+ ch->antenna_max = arg->max_antenna_gain;
+ ch->max_tx_power = arg->max_power;
+
+ /* mode & flags share storage */
+ ch->mode = arg->mode;
+ ch->flags |= __cpu_to_le32(flags);
+}
+
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ar->wmi.unified_ready,
+ WMI_UNIFIED_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len)
+{
+ struct sk_buff *skb;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath10k_htc_alloc_skb(ar, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath10k_warn(ar, "Unaligned WMI skb\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ dev_kfree_skb(skb);
+}
+
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+ u32 cmd = 0;
+
+ if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return -ENOMEM;
+
+ cmd |= SM(cmd_id, WMI_CMD_HDR_CMD_ID);
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = __cpu_to_le32(cmd);
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
+ ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+static void ath10k_wmi_tx_beacon_nowait(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *bcn;
+ bool dtim_zero;
+ bool deliver_cab;
+ int ret;
+
+ spin_lock_bh(&ar->data_lock);
+
+ bcn = arvif->beacon;
+
+ if (!bcn)
+ goto unlock;
+
+ cb = ATH10K_SKB_CB(bcn);
+
+ switch (arvif->beacon_state) {
+ case ATH10K_BEACON_SENDING:
+ case ATH10K_BEACON_SENT:
+ break;
+ case ATH10K_BEACON_SCHEDULED:
+ arvif->beacon_state = ATH10K_BEACON_SENDING;
+ spin_unlock_bh(&ar->data_lock);
+
+ dtim_zero = !!(cb->flags & ATH10K_SKB_F_DTIM_ZERO);
+ deliver_cab = !!(cb->flags & ATH10K_SKB_F_DELIVER_CAB);
+ ret = ath10k_wmi_beacon_send_ref_nowait(arvif->ar,
+ arvif->vdev_id,
+ bcn->data, bcn->len,
+ cb->paddr,
+ dtim_zero,
+ deliver_cab);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (ret == 0)
+ arvif->beacon_state = ATH10K_BEACON_SENT;
+ else
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_wmi_tx_beacons_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+
+ ath10k_wmi_tx_beacon_nowait(arvif);
+}
+
+static void ath10k_wmi_tx_beacons_nowait(struct ath10k *ar)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_wmi_tx_beacons_iter,
+ NULL);
+}
+
+static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
+{
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ wake_up(&ar->wmi.tx_credits_wq);
+}
+
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
+{
+ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+
+ if (cmd_id == WMI_CMD_UNSUPPORTED) {
+ ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
+ cmd_id);
+ return ret;
+ }
+
+ wait_event_timeout(ar->wmi.tx_credits_wq, ({
+ /* try to send pending beacons first. they take priority */
+ ath10k_wmi_tx_beacons_nowait(ar);
+
+ ret = ath10k_wmi_cmd_send_nowait(ar, skb, cmd_id);
+
+ if (ret && test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), 3 * HZ);
+
+ if (ret)
+ dev_kfree_skb_any(skb);
+
+ if (ret == -EAGAIN) {
+ ath10k_warn(ar, "wmi command %d timeout, restarting hardware\n",
+ cmd_id);
+ queue_work(ar->workqueue, &ar->restart_work);
+ }
+
+ return ret;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_mgmt_tx(struct ath10k *ar, struct sk_buff *msdu)
+{
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(msdu);
+ struct ath10k_vif *arvif;
+ struct wmi_mgmt_tx_cmd *cmd;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff *skb;
+ int len;
+ u32 vdev_id;
+ u32 buf_len = msdu->len;
+ u16 fc;
+
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ if (cb->vif) {
+ arvif = (void *)cb->vif->drv_priv;
+ vdev_id = arvif->vdev_id;
+ } else {
+ vdev_id = 0;
+ }
+
+ if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
+ return ERR_PTR(-EINVAL);
+
+ len = sizeof(cmd->hdr) + msdu->len;
+
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ len += IEEE80211_CCMP_MIC_LEN;
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
+ len = round_up(len, 4);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_mgmt_tx_cmd *)skb->data;
+
+ cmd->hdr.vdev_id = __cpu_to_le32(vdev_id);
+ cmd->hdr.tx_rate = 0;
+ cmd->hdr.tx_power = 0;
+ cmd->hdr.buf_len = __cpu_to_le32(buf_len);
+
+ ether_addr_copy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr));
+ memcpy(cmd->buf, msdu->data, msdu->len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %pK len %d ftype %02x stype %02x\n",
+ msdu, skb->len, fc & IEEE80211_FCTL_FTYPE,
+ fc & IEEE80211_FCTL_STYPE);
+ trace_ath10k_tx_hdr(ar, skb->data, skb->len);
+ trace_ath10k_tx_payload(ar, skb->data, skb->len);
+
+ return skb;
+}
+
+static void ath10k_wmi_event_scan_started(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_STARTING:
+ ar->scan.state = ATH10K_SCAN_RUNNING;
+
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ar->hw);
+
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_start_failed(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ath10k_warn(ar, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath10k_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_completed(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath10k_warn(ar, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ __ath10k_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_bss_chan(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_warn(ar, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath10k_wmi_event_scan_foreign_chan(struct ath10k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_warn(ar, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath10k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+ break;
+ }
+}
+
+static const char *
+ath10k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+ return "foreign channel exit";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath10k_wmi_op_pull_scan_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_scan_ev_arg *arg)
+{
+ struct wmi_scan_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->event_type = ev->event_type;
+ arg->reason = ev->reason;
+ arg->channel_freq = ev->channel_freq;
+ arg->scan_req_id = ev->scan_req_id;
+ arg->scan_id = ev->scan_id;
+ arg->vdev_id = ev->vdev_id;
+
+ return 0;
+}
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_scan_ev_arg arg = {};
+ enum wmi_scan_event_type event_type;
+ enum wmi_scan_completion_reason reason;
+ u32 freq;
+ u32 req_id;
+ u32 scan_id;
+ u32 vdev_id;
+ int ret;
+
+ ret = ath10k_wmi_pull_scan(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse scan event: %d\n", ret);
+ return ret;
+ }
+
+ event_type = __le32_to_cpu(arg.event_type);
+ reason = __le32_to_cpu(arg.reason);
+ freq = __le32_to_cpu(arg.channel_freq);
+ req_id = __le32_to_cpu(arg.scan_req_id);
+ scan_id = __le32_to_cpu(arg.scan_id);
+ vdev_id = __le32_to_cpu(arg.vdev_id);
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "scan event %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath10k_wmi_event_scan_type_str(event_type, reason),
+ event_type, reason, freq, req_id, scan_id, vdev_id,
+ ath10k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (event_type) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath10k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath10k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath10k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL:
+ ath10k_wmi_event_scan_foreign_chan(ar, freq);
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath10k_warn(ar, "received scan start failure event\n");
+ ath10k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+ return 0;
+}
+
+/* If keys are configured, HW decrypts all frames
+ * with protected bit set. Mark such frames as decrypted.
+ */
+static void ath10k_wmi_handle_wep_reauth(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ unsigned int hdrlen;
+ bool peer_key;
+ u8 *addr, keyidx;
+
+ if (!ieee80211_is_auth(hdr->frame_control) ||
+ !ieee80211_has_protected(hdr->frame_control))
+ return;
+
+ hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ if (skb->len < (hdrlen + IEEE80211_WEP_IV_LEN))
+ return;
+
+ keyidx = skb->data[hdrlen + (IEEE80211_WEP_IV_LEN - 1)] >> WEP_KEYID_SHIFT;
+ addr = ieee80211_get_SA(hdr);
+
+ spin_lock_bh(&ar->data_lock);
+ peer_key = ath10k_mac_is_peer_wep_key_set(ar, addr, keyidx);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (peer_key) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC,
+ "mac wep key present for peer %pM\n", addr);
+ status->flag |= RX_FLAG_DECRYPTED;
+ }
+}
+
+static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ struct wmi_mgmt_rx_event_v1 *ev_v1;
+ struct wmi_mgmt_rx_event_v2 *ev_v2;
+ struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+ struct wmi_mgmt_rx_ext_info *ext_info;
+ size_t pull_len;
+ u32 msdu_len;
+ u32 len;
+
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
+ ar->running_fw->fw_file.fw_features)) {
+ ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
+ ev_hdr = &ev_v2->hdr.v1;
+ pull_len = sizeof(*ev_v2);
+ } else {
+ ev_v1 = (struct wmi_mgmt_rx_event_v1 *)skb->data;
+ ev_hdr = &ev_v1->hdr;
+ pull_len = sizeof(*ev_v1);
+ }
+
+ if (skb->len < pull_len)
+ return -EPROTO;
+
+ skb_pull(skb, pull_len);
+ arg->channel = ev_hdr->channel;
+ arg->buf_len = ev_hdr->buf_len;
+ arg->status = ev_hdr->status;
+ arg->snr = ev_hdr->snr;
+ arg->phy_mode = ev_hdr->phy_mode;
+ arg->rate = ev_hdr->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+ if (skb->len < msdu_len)
+ return -EPROTO;
+
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
+ /* the WMI buffer might've ended up being padded to 4 bytes due to HTC
+ * trailer with credit update. Trim the excess garbage.
+ */
+ skb_trim(skb, msdu_len);
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_mgmt_rx_ev_arg *arg)
+{
+ struct wmi_10_4_mgmt_rx_event *ev;
+ struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
+ size_t pull_len;
+ u32 msdu_len;
+ struct wmi_mgmt_rx_ext_info *ext_info;
+ u32 len;
+
+ ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
+ ev_hdr = &ev->hdr;
+ pull_len = sizeof(*ev);
+
+ if (skb->len < pull_len)
+ return -EPROTO;
+
+ skb_pull(skb, pull_len);
+ arg->channel = ev_hdr->channel;
+ arg->buf_len = ev_hdr->buf_len;
+ arg->status = ev_hdr->status;
+ arg->snr = ev_hdr->snr;
+ arg->phy_mode = ev_hdr->phy_mode;
+ arg->rate = ev_hdr->rate;
+
+ msdu_len = __le32_to_cpu(arg->buf_len);
+ if (skb->len < msdu_len)
+ return -EPROTO;
+
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
+
+ /* Make sure bytes added for padding are removed. */
+ skb_trim(skb, msdu_len);
+
+ return 0;
+}
+
+static bool ath10k_wmi_rx_is_decrypted(struct ath10k *ar,
+ struct ieee80211_hdr *hdr)
+{
+ if (!ieee80211_has_protected(hdr->frame_control))
+ return false;
+
+ /* FW delivers WEP Shared Auth frame with Protected Bit set and
+ * encrypted payload. However in case of PMF it delivers decrypted
+ * frames with Protected Bit set.
+ */
+ if (ieee80211_is_auth(hdr->frame_control))
+ return false;
+
+ /* qca99x0 based FW delivers broadcast or multicast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) &&
+ ar->hw_params.sw_decrypt_mcast_mgmt)
+ return false;
+
+ return true;
+}
+
+static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
+ u32 status)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr;
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *msdu;
+ int ret;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pkt_addr = idr_find(&wmi->mgmt_pending_tx, desc_id);
+ if (!pkt_addr) {
+ ath10k_warn(ar, "received mgmt tx completion for invalid msdu_id: %d\n",
+ desc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_TO_DEVICE);
+ info = IEEE80211_SKB_CB(msdu);
+
+ if (status)
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+ else
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ ret = 0;
+
+out:
+ idr_remove(&wmi->mgmt_pending_tx, desc_id);
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+}
+
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tlv_mgmt_tx_compl_ev_arg arg;
+ int ret;
+
+ ret = ath10k_wmi_pull_mgmt_tx_compl(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt comp event: %d\n", ret);
+ return ret;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, __le32_to_cpu(arg.desc_id),
+ __le32_to_cpu(arg.status));
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi tlv evnt mgmt tx completion\n");
+
+ return 0;
+}
+
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_mgmt_rx_ev_arg arg = {};
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_supported_band *sband;
+ u32 rx_status;
+ u32 channel;
+ u32 phy_mode;
+ u32 snr;
+ u32 rate;
+ u16 fc;
+ int ret;
+
+ ret = ath10k_wmi_pull_mgmt_rx(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse mgmt rx event: %d\n", ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ channel = __le32_to_cpu(arg.channel);
+ rx_status = __le32_to_cpu(arg.status);
+ snr = __le32_to_cpu(arg.snr);
+ phy_mode = __le32_to_cpu(arg.phy_mode);
+ rate = __le32_to_cpu(arg.rate);
+
+ memset(status, 0, sizeof(*status));
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "event mgmt rx status %08x\n", rx_status);
+
+ if ((test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (rx_status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (rx_status & WMI_RX_STATUS_EXT_INFO) {
+ status->mactime =
+ __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+ }
+ /* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
+ * MODE_11B. This means phy_mode is not a reliable source for the band
+ * of mgmt rx.
+ */
+ if (channel >= 1 && channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (channel >= 36 && channel <= ATH10K_MAX_5G_CHAN) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+
+ if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
+
+ sband = &ar->mac.sbands[status->band];
+
+ status->freq = ieee80211_channel_to_frequency(channel, status->band);
+ status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = ath10k_mac_bitrate_to_idx(sband, rate / 100);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ ath10k_wmi_handle_wep_reauth(ar, skb, status);
+
+ if (ath10k_wmi_rx_is_decrypted(ar, hdr)) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_action(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath10k_mac_handle_beacon(ar, skb);
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+ return 0;
+}
+
+static int freq_to_idx(struct ath10k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath10k_wmi_op_pull_ch_info_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ struct wmi_chan_info_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_ch_info_ev_arg *arg)
+{
+ struct wmi_10_4_chan_info_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->err_code = ev->err_code;
+ arg->freq = ev->freq;
+ arg->cmd_flags = ev->cmd_flags;
+ arg->noise_floor = ev->noise_floor;
+ arg->rx_clear_count = ev->rx_clear_count;
+ arg->cycle_count = ev->cycle_count;
+ arg->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ arg->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ arg->rx_frame_count = ev->rx_frame_count;
+
+ return 0;
+}
+
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_ch_info_ev_arg arg = {};
+ struct survey_info *survey;
+ u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
+ int idx, ret;
+
+ ret = ath10k_wmi_pull_ch_info(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse chan info event: %d\n", ret);
+ return;
+ }
+
+ err_code = __le32_to_cpu(arg.err_code);
+ freq = __le32_to_cpu(arg.freq);
+ cmd_flags = __le32_to_cpu(arg.cmd_flags);
+ noise_floor = __le32_to_cpu(arg.noise_floor);
+ rx_clear_count = __le32_to_cpu(arg.rx_clear_count);
+ cycle_count = __le32_to_cpu(arg.cycle_count);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
+ err_code, freq, cmd_flags, noise_floor, rx_clear_count,
+ cycle_count);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH10K_SCAN_IDLE:
+ case ATH10K_SCAN_STARTING:
+ ath10k_warn(ar, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH10K_SCAN_RUNNING:
+ case ATH10K_SCAN_ABORTING:
+ break;
+ }
+
+ idx = freq_to_idx(ar, freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ freq, idx);
+ goto exit;
+ }
+
+ if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+ if (ar->ch_info_can_report_survey) {
+ survey = &ar->survey[idx];
+ survey->noise = noise_floor;
+ survey->filled = SURVEY_INFO_NOISE_DBM;
+
+ ath10k_hw_fill_survey_time(ar,
+ survey,
+ cycle_count,
+ rx_clear_count,
+ ar->survey_last_cycle_count,
+ ar->survey_last_rx_clear_count);
+ }
+
+ ar->ch_info_can_report_survey = false;
+ } else {
+ ar->ch_info_can_report_survey = true;
+ }
+
+ if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) {
+ ar->survey_last_rx_clear_count = rx_clear_count;
+ ar->survey_last_cycle_count = cycle_count;
+ }
+
+exit:
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_echo_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_echo_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse echo: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event echo value 0x%08x\n",
+ le32_to_cpu(arg.value));
+
+ if (le32_to_cpu(arg.value) == ATH10K_WMI_BARRIER_ECHO_ID)
+ complete(&ar->wmi.barrier);
+}
+
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event debug mesg len %d\n",
+ skb->len);
+
+ trace_ath10k_wmi_dbglog(ar, skb->data, skb->len);
+
+ return 0;
+}
+
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
+ dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
+ dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
+ dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
+ dst->cycle_count = __le32_to_cpu(src->cycle_count);
+ dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
+ dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+}
+
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = __le32_to_cpu(src->comp_queued);
+ dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
+ dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
+ dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
+ dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
+ dst->local_enqued = __le32_to_cpu(src->local_enqued);
+ dst->local_freed = __le32_to_cpu(src->local_freed);
+ dst->hw_queued = __le32_to_cpu(src->hw_queued);
+ dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
+ dst->underrun = __le32_to_cpu(src->underrun);
+ dst->tx_abort = __le32_to_cpu(src->tx_abort);
+ dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+ dst->tx_ko = __le32_to_cpu(src->tx_ko);
+ dst->data_rc = __le32_to_cpu(src->data_rc);
+ dst->self_triggers = __le32_to_cpu(src->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+}
+
+static void
+ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = __le32_to_cpu(src->comp_queued);
+ dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
+ dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
+ dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
+ dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
+ dst->local_enqued = __le32_to_cpu(src->local_enqued);
+ dst->local_freed = __le32_to_cpu(src->local_freed);
+ dst->hw_queued = __le32_to_cpu(src->hw_queued);
+ dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
+ dst->underrun = __le32_to_cpu(src->underrun);
+ dst->tx_abort = __le32_to_cpu(src->tx_abort);
+ dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
+ dst->tx_ko = __le32_to_cpu(src->tx_ko);
+ dst->data_rc = __le32_to_cpu(src->data_rc);
+ dst->self_triggers = __le32_to_cpu(src->self_triggers);
+ dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+ dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+ dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+ dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+ dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+ dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+ dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+ dst->hw_paused = __le32_to_cpu(src->hw_paused);
+ dst->seq_posted = __le32_to_cpu(src->seq_posted);
+ dst->seq_failed_queueing =
+ __le32_to_cpu(src->seq_failed_queueing);
+ dst->seq_completed = __le32_to_cpu(src->seq_completed);
+ dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
+ dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
+ dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
+ dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
+ dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
+ dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
+ dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
+ dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
+}
+
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change = __le32_to_cpu(src->mid_ppdu_route_change);
+ dst->status_rcvd = __le32_to_cpu(src->status_rcvd);
+ dst->r0_frags = __le32_to_cpu(src->r0_frags);
+ dst->r1_frags = __le32_to_cpu(src->r1_frags);
+ dst->r2_frags = __le32_to_cpu(src->r2_frags);
+ dst->r3_frags = __le32_to_cpu(src->r3_frags);
+ dst->htt_msdus = __le32_to_cpu(src->htt_msdus);
+ dst->htt_mpdus = __le32_to_cpu(src->htt_mpdus);
+ dst->loc_msdus = __le32_to_cpu(src->loc_msdus);
+ dst->loc_mpdus = __le32_to_cpu(src->loc_mpdus);
+ dst->oversize_amsdu = __le32_to_cpu(src->oversize_amsdu);
+ dst->phy_errs = __le32_to_cpu(src->phy_errs);
+ dst->phy_err_drop = __le32_to_cpu(src->phy_err_drop);
+ dst->mpdu_errs = __le32_to_cpu(src->mpdu_errs);
+}
+
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+ struct ath10k_fw_stats_pdev *dst)
+{
+ dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
+ dst->rts_bad = __le32_to_cpu(src->rts_bad);
+ dst->rts_good = __le32_to_cpu(src->rts_good);
+ dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
+ dst->no_beacons = __le32_to_cpu(src->no_beacons);
+ dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
+}
+
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+}
+
+static void
+ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+}
+
+static void
+ath10k_wmi_10_4_pull_vdev_stats(const struct wmi_vdev_stats_extd *src,
+ struct ath10k_fw_stats_vdev_extd *dst)
+{
+ dst->vdev_id = __le32_to_cpu(src->vdev_id);
+ dst->ppdu_aggr_cnt = __le32_to_cpu(src->ppdu_aggr_cnt);
+ dst->ppdu_noack = __le32_to_cpu(src->ppdu_noack);
+ dst->mpdu_queued = __le32_to_cpu(src->mpdu_queued);
+ dst->ppdu_nonaggr_cnt = __le32_to_cpu(src->ppdu_nonaggr_cnt);
+ dst->mpdu_sw_requeued = __le32_to_cpu(src->mpdu_sw_requeued);
+ dst->mpdu_suc_retry = __le32_to_cpu(src->mpdu_suc_retry);
+ dst->mpdu_suc_multitry = __le32_to_cpu(src->mpdu_suc_multitry);
+ dst->mpdu_fail_retry = __le32_to_cpu(src->mpdu_fail_retry);
+ dst->tx_ftm_suc = __le32_to_cpu(src->tx_ftm_suc);
+ dst->tx_ftm_suc_retry = __le32_to_cpu(src->tx_ftm_suc_retry);
+ dst->tx_ftm_fail = __le32_to_cpu(src->tx_ftm_fail);
+ dst->rx_ftmr_cnt = __le32_to_cpu(src->rx_ftmr_cnt);
+ dst->rx_ftmr_dup_cnt = __le32_to_cpu(src->rx_ftmr_dup_cnt);
+ dst->rx_iftmr_cnt = __le32_to_cpu(src->rx_iftmr_cnt);
+ dst->rx_iftmr_dup_cnt = __le32_to_cpu(src->rx_iftmr_dup_cnt);
+}
+
+static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(src, dst);
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10x_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats, num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10x_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10x_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_2_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_2_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_2_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_peer_stats;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_2_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ /* fw doesn't implement vdev stats */
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_2_4_ext_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+ int stats_len;
+
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
+ else
+ stats_len = sizeof(struct wmi_10_2_4_peer_stats);
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, stats_len))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_peer_stats(&src->common.old, dst);
+
+ dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
+
+ if (ath10k_peer_stats_enabled(ar))
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
+ /* FIXME: expose 10.2 specific values */
+
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct ath10k_fw_stats *stats)
+{
+ const struct wmi_10_2_stats_event *ev = (void *)skb->data;
+ u32 num_pdev_stats;
+ u32 num_pdev_ext_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 stats_id;
+ int i;
+
+ if (!skb_pull(skb, sizeof(*ev)))
+ return -EPROTO;
+
+ num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
+ num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
+ num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
+ num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ num_bcnflt_stats = __le32_to_cpu(ev->num_bcnflt_stats);
+ stats_id = __le32_to_cpu(ev->stats_id);
+
+ for (i = 0; i < num_pdev_stats; i++) {
+ const struct wmi_10_4_pdev_stats *src;
+ struct ath10k_fw_stats_pdev *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
+ ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
+ ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
+
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < num_pdev_ext_stats; i++) {
+ const struct wmi_10_2_pdev_ext_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ for (i = 0; i < num_vdev_stats; i++) {
+ const struct wmi_vdev_stats *src;
+
+ /* Ignore vdev stats here as it has only vdev id. Actual vdev
+ * stats will be retrieved from vdev extended stats.
+ */
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+ }
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_4_peer_stats *src;
+ struct ath10k_fw_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath10k_wmi_10_4_pull_peer_stats(src, dst);
+ list_add_tail(&dst->list, &stats->peers);
+ }
+
+ for (i = 0; i < num_bcnflt_stats; i++) {
+ const struct wmi_10_4_bss_bcn_filter_stats *src;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ /* FIXME: expose values to userspace
+ *
+ * Note: Even though this loop seems to do nothing it is
+ * required to parse following sub-structures properly.
+ */
+ }
+
+ if (stats_id & WMI_10_4_STAT_PEER_EXTD) {
+ stats->extended = true;
+
+ for (i = 0; i < num_peer_stats; i++) {
+ const struct wmi_10_4_peer_extd_stats *src;
+ struct ath10k_fw_extd_stats_peer *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ether_addr_copy(dst->peer_macaddr,
+ src->peer_macaddr.addr);
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
+ list_add_tail(&dst->list, &stats->peers_extd);
+ }
+ }
+
+ if (stats_id & WMI_10_4_STAT_VDEV_EXTD) {
+ for (i = 0; i < num_vdev_stats; i++) {
+ const struct wmi_vdev_stats_extd *src;
+ struct ath10k_fw_stats_vdev_extd *dst;
+
+ src = (void *)skb->data;
+ if (!skb_pull(skb, sizeof(*src)))
+ return -EPROTO;
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+ ath10k_wmi_10_4_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
+ ath10k_debug_fw_stats_process(ar, skb);
+}
+
+static int
+ath10k_wmi_op_pull_vdev_start_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_vdev_start_ev_arg *arg)
+{
+ struct wmi_vdev_start_response_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->req_id = ev->req_id;
+ arg->resp_type = ev->resp_type;
+ arg->status = ev->status;
+
+ return 0;
+}
+
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_ev_arg arg = {};
+ int ret;
+ u32 status;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_START_RESP_EVENTID\n");
+
+ ar->last_wmi_vdev_start_status = 0;
+
+ ret = ath10k_wmi_pull_vdev_start(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse vdev start event: %d\n", ret);
+ ar->last_wmi_vdev_start_status = ret;
+ goto out;
+ }
+
+ status = __le32_to_cpu(arg.status);
+ if (WARN_ON_ONCE(status)) {
+ ath10k_warn(ar, "vdev-start-response reports status error: %d (%s)\n",
+ status, (status == WMI_VDEV_START_CHAN_INVALID) ?
+ "chan-invalid" : "unknown");
+ /* Setup is done one way or another though, so we should still
+ * do the completion, so don't return here.
+ */
+ ar->last_wmi_vdev_start_status = -EINVAL;
+ }
+
+out:
+ complete(&ar->vdev_setup_done);
+}
+
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STOPPED_EVENTID\n");
+ complete(&ar->vdev_setup_done);
+}
+
+static int
+ath10k_wmi_op_pull_peer_kick_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_peer_kick_ev_arg *arg)
+{
+ struct wmi_peer_sta_kickout_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ return 0;
+}
+
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_peer_kick_ev_arg arg = {};
+ struct ieee80211_sta *sta;
+ int ret;
+
+ ret = ath10k_wmi_pull_peer_kick(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse peer kickout event: %d\n",
+ ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi event peer sta kickout %pM\n",
+ arg.mac_addr);
+
+ rcu_read_lock();
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw, arg.mac_addr, NULL);
+ if (!sta) {
+ ath10k_warn(ar, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ rcu_read_unlock();
+}
+
+/*
+ * FIXME
+ *
+ * We don't report to mac80211 sleep state of connected
+ * stations. Due to this mac80211 can't fill in TIM IE
+ * correctly.
+ *
+ * I know of no way of getting nullfunc frames that contain
+ * sleep transition from connected stations - these do not
+ * seem to be sent from the target to the host. There also
+ * doesn't seem to be a dedicated event for that. So the
+ * only way left to do this would be to read tim_bitmap
+ * during SWBA.
+ *
+ * We could probably try using tim_bitmap from SWBA to tell
+ * mac80211 which stations are asleep and which are not. The
+ * problem here is calling mac80211 functions so many times
+ * could take too long and make us miss the time to submit
+ * the beacon to the target.
+ *
+ * So as a workaround we try to extend the TIM IE if there
+ * is unicast buffered for stations with aid > 7 and fill it
+ * in ourselves.
+ */
+static void ath10k_wmi_update_tim(struct ath10k *ar,
+ struct ath10k_vif *arvif,
+ struct sk_buff *bcn,
+ const struct wmi_tim_info_arg *tim_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)bcn->data;
+ struct ieee80211_tim_ie *tim;
+ u8 *ies, *ie;
+ u8 ie_len, pvm_len;
+ __le32 t;
+ u32 v, tim_len;
+
+ /* When FW reports 0 in tim_len, ensure atleast first byte
+ * in tim_bitmap is considered for pvm calculation.
+ */
+ tim_len = tim_info->tim_len ? __le32_to_cpu(tim_info->tim_len) : 1;
+
+ /* if next SWBA has no tim_changed the tim_bitmap is garbage.
+ * we must copy the bitmap upon change and reuse it later
+ */
+ if (__le32_to_cpu(tim_info->tim_changed)) {
+ int i;
+
+ if (sizeof(arvif->u.ap.tim_bitmap) < tim_len) {
+ ath10k_warn(ar, "SWBA TIM field is too big (%u), truncated it to %zu",
+ tim_len, sizeof(arvif->u.ap.tim_bitmap));
+ tim_len = sizeof(arvif->u.ap.tim_bitmap);
+ }
+
+ for (i = 0; i < tim_len; i++) {
+ t = tim_info->tim_bitmap[i / 4];
+ v = __le32_to_cpu(t);
+ arvif->u.ap.tim_bitmap[i] = (v >> ((i % 4) * 8)) & 0xFF;
+ }
+
+ /* FW reports either length 0 or length based on max supported
+ * station. so we calculate this on our own
+ */
+ arvif->u.ap.tim_len = 0;
+ for (i = 0; i < tim_len; i++)
+ if (arvif->u.ap.tim_bitmap[i])
+ arvif->u.ap.tim_len = i;
+
+ arvif->u.ap.tim_len++;
+ }
+
+ ies = bcn->data;
+ ies += ieee80211_hdrlen(hdr->frame_control);
+ ies += 12; /* fixed parameters */
+
+ ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
+ (u8 *)skb_tail_pointer(bcn) - ies);
+ if (!ie) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ ath10k_warn(ar, "no tim ie found;\n");
+ return;
+ }
+
+ tim = (void *)ie + 2;
+ ie_len = ie[1];
+ pvm_len = ie_len - 3; /* exclude dtim count, dtim period, bmap ctl */
+
+ if (pvm_len < arvif->u.ap.tim_len) {
+ int expand_size = tim_len - pvm_len;
+ int move_size = skb_tail_pointer(bcn) - (ie + 2 + ie_len);
+ void *next_ie = ie + 2 + ie_len;
+
+ if (skb_put(bcn, expand_size)) {
+ memmove(next_ie + expand_size, next_ie, move_size);
+
+ ie[1] += expand_size;
+ ie_len += expand_size;
+ pvm_len += expand_size;
+ } else {
+ ath10k_warn(ar, "tim expansion failed\n");
+ }
+ }
+
+ if (pvm_len > tim_len) {
+ ath10k_warn(ar, "tim pvm length is too great (%d)\n", pvm_len);
+ return;
+ }
+
+ tim->bitmap_ctrl = !!__le32_to_cpu(tim_info->tim_mcast);
+ memcpy(tim->virtual_map, arvif->u.ap.tim_bitmap, pvm_len);
+
+ if (tim->dtim_count == 0) {
+ ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DTIM_ZERO;
+
+ if (__le32_to_cpu(tim_info->tim_mcast) == 1)
+ ATH10K_SKB_CB(bcn)->flags |= ATH10K_SKB_F_DELIVER_CAB;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "dtim %d/%d mcast %d pvmlen %d\n",
+ tim->dtim_count, tim->dtim_period,
+ tim->bitmap_ctrl, pvm_len);
+}
+
+static void ath10k_wmi_update_noa(struct ath10k *ar, struct ath10k_vif *arvif,
+ struct sk_buff *bcn,
+ const struct wmi_p2p_noa_info *noa)
+{
+ if (!arvif->vif->p2p)
+ return;
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "noa changed: %d\n", noa->changed);
+
+ if (noa->changed & WMI_P2P_NOA_CHANGED_BIT)
+ ath10k_p2p_noa_update(arvif, noa);
+
+ if (arvif->u.ap.noa_data)
+ if (!pskb_expand_head(bcn, 0, arvif->u.ap.noa_len, GFP_ATOMIC))
+ skb_put_data(bcn, arvif->u.ap.noa_data,
+ arvif->u.ap.noa_len);
+}
+
+static int ath10k_wmi_op_pull_swba_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_host_swba_event *ev = (void *)skb->data;
+ u32 map;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+ arg->noa_info[i] = &ev->bcn_info[i].p2p_noa_info;
+ i++;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_2_4_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_10_2_4_host_swba_event *ev = (void *)skb->data;
+ u32 map;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ arg->tim_info[i].tim_len = ev->bcn_info[i].tim_info.tim_len;
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+ i++;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_swba_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_swba_ev_arg *arg)
+{
+ struct wmi_10_4_host_swba_event *ev = (void *)skb->data;
+ u32 map, tim_len;
+ size_t i;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_map = ev->vdev_map;
+
+ for (i = 0, map = __le32_to_cpu(ev->vdev_map); map; map >>= 1) {
+ if (!(map & BIT(0)))
+ continue;
+
+ /* If this happens there were some changes in firmware and
+ * ath10k should update the max size of tim_info array.
+ */
+ if (WARN_ON_ONCE(i == ARRAY_SIZE(arg->tim_info)))
+ break;
+
+ if (__le32_to_cpu(ev->bcn_info[i].tim_info.tim_len) >
+ sizeof(ev->bcn_info[i].tim_info.tim_bitmap)) {
+ ath10k_warn(ar, "refusing to parse invalid swba structure\n");
+ return -EPROTO;
+ }
+
+ tim_len = __le32_to_cpu(ev->bcn_info[i].tim_info.tim_len);
+ if (tim_len) {
+ /* Exclude 4 byte guard length */
+ tim_len -= 4;
+ arg->tim_info[i].tim_len = __cpu_to_le32(tim_len);
+ } else {
+ arg->tim_info[i].tim_len = 0;
+ }
+
+ arg->tim_info[i].tim_mcast = ev->bcn_info[i].tim_info.tim_mcast;
+ arg->tim_info[i].tim_bitmap =
+ ev->bcn_info[i].tim_info.tim_bitmap;
+ arg->tim_info[i].tim_changed =
+ ev->bcn_info[i].tim_info.tim_changed;
+ arg->tim_info[i].tim_num_ps_pending =
+ ev->bcn_info[i].tim_info.tim_num_ps_pending;
+
+ /* 10.4 firmware doesn't have p2p support. notice of absence
+ * info can be ignored for now.
+ */
+
+ i++;
+ }
+
+ return 0;
+}
+
+static enum wmi_txbf_conf ath10k_wmi_10_4_txbf_conf_scheme(struct ath10k *ar)
+{
+ return WMI_TXBF_CONF_BEFORE_ASSOC;
+}
+
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_swba_ev_arg arg = {};
+ u32 map;
+ int i = -1;
+ const struct wmi_tim_info_arg *tim_info;
+ const struct wmi_p2p_noa_info *noa_info;
+ struct ath10k_vif *arvif;
+ struct sk_buff *bcn;
+ dma_addr_t paddr;
+ int ret, vdev_id = 0;
+
+ ret = ath10k_wmi_pull_swba(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse swba event: %d\n", ret);
+ return;
+ }
+
+ map = __le32_to_cpu(arg.vdev_map);
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT, "mgmt swba vdev_map 0x%x\n",
+ map);
+
+ for (; map; map >>= 1, vdev_id++) {
+ if (!(map & 0x1))
+ continue;
+
+ i++;
+
+ if (i >= WMI_MAX_AP_VDEV) {
+ ath10k_warn(ar, "swba has corrupted vdev map\n");
+ break;
+ }
+
+ tim_info = &arg.tim_info[i];
+ noa_info = arg.noa_info[i];
+
+ ath10k_dbg(ar, ATH10K_DBG_MGMT,
+ "mgmt event bcn_info %d tim_len %d mcast %d changed %d num_ps_pending %d bitmap 0x%08x%08x%08x%08x\n",
+ i,
+ __le32_to_cpu(tim_info->tim_len),
+ __le32_to_cpu(tim_info->tim_mcast),
+ __le32_to_cpu(tim_info->tim_changed),
+ __le32_to_cpu(tim_info->tim_num_ps_pending),
+ __le32_to_cpu(tim_info->tim_bitmap[3]),
+ __le32_to_cpu(tim_info->tim_bitmap[2]),
+ __le32_to_cpu(tim_info->tim_bitmap[1]),
+ __le32_to_cpu(tim_info->tim_bitmap[0]));
+
+ /* TODO: Only first 4 word from tim_bitmap is dumped.
+ * Extend debug code to dump full tim_bitmap.
+ */
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (arvif == NULL) {
+ ath10k_warn(ar, "no vif for vdev_id %d found\n",
+ vdev_id);
+ continue;
+ }
+
+ /* mac80211 would have already asked us to stop beaconing and
+ * bring the vdev down, so continue in that case
+ */
+ if (!arvif->is_up)
+ continue;
+
+ /* There are no completions for beacons so wait for next SWBA
+ * before telling mac80211 to decrement CSA counter
+ *
+ * Once CSA counter is completed stop sending beacons until
+ * actual channel switch is done
+ */
+ if (arvif->vif->csa_active &&
+ ieee80211_csa_is_complete(arvif->vif)) {
+ ieee80211_csa_finish(arvif->vif);
+ continue;
+ }
+
+ bcn = ieee80211_beacon_get(ar->hw, arvif->vif);
+ if (!bcn) {
+ ath10k_warn(ar, "could not get mac80211 beacon\n");
+ continue;
+ }
+
+ ath10k_tx_h_seq_no(arvif->vif, bcn);
+ ath10k_wmi_update_tim(ar, arvif, bcn, tim_info);
+ ath10k_wmi_update_noa(ar, arvif, bcn, noa_info);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arvif->beacon) {
+ switch (arvif->beacon_state) {
+ case ATH10K_BEACON_SENT:
+ break;
+ case ATH10K_BEACON_SCHEDULED:
+ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped old beacon\n",
+ arvif->vdev_id);
+ break;
+ case ATH10K_BEACON_SENDING:
+ ath10k_warn(ar, "SWBA overrun on vdev %d, skipped new beacon\n",
+ arvif->vdev_id);
+ dev_kfree_skb(bcn);
+ goto skip;
+ }
+
+ ath10k_mac_vif_beacon_free(arvif);
+ }
+
+ if (!arvif->beacon_buf) {
+ paddr = dma_map_single(arvif->ar->dev, bcn->data,
+ bcn->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(arvif->ar->dev, paddr);
+ if (ret) {
+ ath10k_warn(ar, "failed to map beacon: %d\n",
+ ret);
+ dev_kfree_skb_any(bcn);
+ goto skip;
+ }
+
+ ATH10K_SKB_CB(bcn)->paddr = paddr;
+ } else {
+ if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
+ ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
+ bcn->len, IEEE80211_MAX_FRAME_LEN);
+ skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
+ }
+ memcpy(arvif->beacon_buf, bcn->data, bcn->len);
+ ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
+ }
+
+ arvif->beacon = bcn;
+ arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
+
+ trace_ath10k_tx_hdr(ar, bcn->data, bcn->len);
+ trace_ath10k_tx_payload(ar, bcn->data, bcn->len);
+
+skip:
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ ath10k_wmi_tx_beacons_nowait(ar);
+}
+
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TBTTOFFSET_UPDATE_EVENTID\n");
+}
+
+static void ath10k_radar_detected(struct ath10k *ar)
+{
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs radar detected\n");
+ ATH10K_DFS_STAT_INC(ar, radar_detected);
+
+ /* Control radar events reporting in debugfs file
+ * dfs_block_radar_events
+ */
+ if (ar->dfs_block_radar_events)
+ ath10k_info(ar, "DFS Radar detected, but ignored as requested\n");
+ else
+ ieee80211_radar_detected(ar->hw);
+}
+
+static void ath10k_radar_confirmation_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k,
+ radar_confirmation_work);
+ struct ath10k_radar_found_info radar_info;
+ int ret, time_left;
+
+ reinit_completion(&ar->wmi.radar_confirm);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(&radar_info, &ar->last_radar_info, sizeof(radar_info));
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_report_radar_found(ar, &radar_info);
+ if (ret) {
+ ath10k_warn(ar, "failed to send radar found %d\n", ret);
+ goto wait_complete;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->wmi.radar_confirm,
+ ATH10K_WMI_DFS_CONF_TIMEOUT_HZ);
+ if (time_left) {
+ /* DFS Confirmation status event received and
+ * necessary action completed.
+ */
+ goto wait_complete;
+ } else {
+ /* DFS Confirmation event not received from FW.Considering this
+ * as real radar.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs confirmation not received from fw, considering as radar\n");
+ goto radar_detected;
+ }
+
+radar_detected:
+ ath10k_radar_detected(ar);
+
+ /* Reset state to allow sending confirmation on consecutive radar
+ * detections, unless radar confirmation is disabled/stopped.
+ */
+wait_complete:
+ spin_lock_bh(&ar->data_lock);
+ if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_STOPPED)
+ ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath10k_dfs_radar_report(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_radar_report *rr,
+ u64 tsf)
+{
+ u32 reg0, reg1, tsf32l;
+ struct ieee80211_channel *ch;
+ struct pulse_event pe;
+ struct radar_detector_specs rs;
+ u64 tsf64;
+ u8 rssi, width;
+ struct ath10k_radar_found_info *radar_info;
+
+ reg0 = __le32_to_cpu(rr->reg0);
+ reg1 = __le32_to_cpu(rr->reg1);
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report chirp %d max_width %d agc_total_gain %d pulse_delta_diff %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH),
+ MS(reg0, RADAR_REPORT_REG0_AGC_TOTAL_GAIN),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_DIFF));
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_delta_pean %d pulse_sidx %d fft_valid %d agc_mb_gain %d subchan_mask %d\n",
+ MS(reg0, RADAR_REPORT_REG0_PULSE_DELTA_PEAK),
+ MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK));
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr radar report pulse_tsf_offset 0x%X pulse_dur: %d\n",
+ MS(reg1, RADAR_REPORT_REG1_PULSE_TSF_OFFSET),
+ MS(reg1, RADAR_REPORT_REG1_PULSE_DUR));
+
+ if (!ar->dfs_detector)
+ return;
+
+ spin_lock_bh(&ar->data_lock);
+ ch = ar->rx_channel;
+
+ /* fetch target operating channel during channel change */
+ if (!ch)
+ ch = ar->tgt_oper_chan;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!ch) {
+ ath10k_warn(ar, "failed to derive channel for radar pulse, treating as radar\n");
+ goto radar_detected;
+ }
+
+ /* report event to DFS pattern detector */
+ tsf32l = phyerr->tsf_timestamp;
+ tsf64 = tsf & (~0xFFFFFFFFULL);
+ tsf64 |= tsf32l;
+
+ width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
+ rssi = phyerr->rssi_combined;
+
+ /* hardware store this as 8 bit signed value,
+ * set to zero if negative number
+ */
+ if (rssi & 0x80)
+ rssi = 0;
+
+ pe.ts = tsf64;
+ pe.freq = ch->center_freq;
+ pe.width = width;
+ pe.rssi = rssi;
+ pe.chirp = (MS(reg0, RADAR_REPORT_REG0_PULSE_IS_CHIRP) != 0);
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs add pulse freq: %d, width: %d, rssi %d, tsf: %llX\n",
+ pe.freq, pe.width, pe.rssi, pe.ts);
+
+ ATH10K_DFS_STAT_INC(ar, pulses_detected);
+
+ if (!ar->dfs_detector->add_pulse(ar->dfs_detector, &pe, &rs)) {
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs no pulse pattern detected, yet\n");
+ return;
+ }
+
+ if ((test_bit(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, ar->wmi.svc_map)) &&
+ ar->dfs_detector->region == NL80211_DFS_FCC) {
+ /* Consecutive radar indications need not be
+ * sent to the firmware until we get confirmation
+ * for the previous detected radar.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->radar_conf_state != ATH10K_RADAR_CONFIRMATION_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+ ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_INPROGRESS;
+ radar_info = &ar->last_radar_info;
+
+ radar_info->pri_min = rs.pri_min;
+ radar_info->pri_max = rs.pri_max;
+ radar_info->width_min = rs.width_min;
+ radar_info->width_max = rs.width_max;
+ /*TODO Find sidx_min and sidx_max */
+ radar_info->sidx_min = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
+ radar_info->sidx_max = MS(reg0, RADAR_REPORT_REG0_PULSE_SIDX);
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "sending wmi radar found cmd pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
+ radar_info->pri_min, radar_info->pri_max,
+ radar_info->width_min, radar_info->width_max,
+ radar_info->sidx_min, radar_info->sidx_max);
+ ieee80211_queue_work(ar->hw, &ar->radar_confirmation_work);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+ }
+
+radar_detected:
+ ath10k_radar_detected(ar);
+}
+
+static int ath10k_dfs_fft_report(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ const struct phyerr_fft_report *fftr,
+ u64 tsf)
+{
+ u32 reg0, reg1;
+ u8 rssi, peak_mag;
+
+ reg0 = __le32_to_cpu(fftr->reg0);
+ reg1 = __le32_to_cpu(fftr->reg1);
+ rssi = phyerr->rssi_combined;
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
+ MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX),
+ MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX));
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi phyerr fft report rel_pwr_db %d avgpwr_db %d peak_mag %d num_store_bin %d\n",
+ MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_AVGPWR_DB),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG),
+ MS(reg1, SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB));
+
+ peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
+
+ /* false event detection */
+ if (rssi == DFS_RSSI_POSSIBLY_FALSE &&
+ peak_mag < 2 * DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE) {
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs false pulse detected\n");
+ ATH10K_DFS_STAT_INC(ar, pulses_discarded);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ u64 tsf)
+{
+ int buf_len, tlv_len, res, i = 0;
+ const struct phyerr_tlv *tlv;
+ const struct phyerr_radar_report *rr;
+ const struct phyerr_fft_report *fftr;
+ const u8 *tlv_buf;
+
+ buf_len = phyerr->buf_len;
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
+ phyerr->phy_err_code, phyerr->rssi_combined,
+ phyerr->tsf_timestamp, tsf, buf_len);
+
+ /* Skip event if DFS disabled */
+ if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
+ return;
+
+ ATH10K_DFS_STAT_INC(ar, pulses_total);
+
+ while (i < buf_len) {
+ if (i + sizeof(*tlv) > buf_len) {
+ ath10k_warn(ar, "too short buf for tlv header (%d)\n",
+ i);
+ return;
+ }
+
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
+ tlv_len = __le16_to_cpu(tlv->len);
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
+ tlv_len, tlv->tag, tlv->sig);
+
+ switch (tlv->tag) {
+ case PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY:
+ if (i + sizeof(*tlv) + sizeof(*rr) > buf_len) {
+ ath10k_warn(ar, "too short radar pulse summary (%d)\n",
+ i);
+ return;
+ }
+
+ rr = (struct phyerr_radar_report *)tlv_buf;
+ ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
+ break;
+ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+ if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
+ ath10k_warn(ar, "too short fft report (%d)\n",
+ i);
+ return;
+ }
+
+ fftr = (struct phyerr_fft_report *)tlv_buf;
+ res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
+ if (res)
+ return;
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+}
+
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ u64 tsf)
+{
+ int buf_len, tlv_len, res, i = 0;
+ struct phyerr_tlv *tlv;
+ const void *tlv_buf;
+ const struct phyerr_fft_report *fftr;
+ size_t fftr_len;
+
+ buf_len = phyerr->buf_len;
+
+ while (i < buf_len) {
+ if (i + sizeof(*tlv) > buf_len) {
+ ath10k_warn(ar, "failed to parse phyerr tlv header at byte %d\n",
+ i);
+ return;
+ }
+
+ tlv = (struct phyerr_tlv *)&phyerr->buf[i];
+ tlv_len = __le16_to_cpu(tlv->len);
+ tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
+
+ if (i + sizeof(*tlv) + tlv_len > buf_len) {
+ ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
+ i);
+ return;
+ }
+
+ switch (tlv->tag) {
+ case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
+ if (sizeof(*fftr) > tlv_len) {
+ ath10k_warn(ar, "failed to parse fft report at byte %d\n",
+ i);
+ return;
+ }
+
+ fftr_len = tlv_len - sizeof(*fftr);
+ fftr = tlv_buf;
+ res = ath10k_spectral_process_fft(ar, phyerr,
+ fftr, fftr_len,
+ tsf);
+ if (res < 0) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "failed to process fft report: %d\n",
+ res);
+ return;
+ }
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+}
+
+static int ath10k_wmi_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ struct wmi_phyerr_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ arg->num_phyerrs = __le32_to_cpu(ev->num_phyerrs);
+ arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+ arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+ arg->buf_len = skb->len - sizeof(*ev);
+ arg->phyerrs = ev->phyerrs;
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_phyerr_ev_hdr(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_phyerr_hdr_arg *arg)
+{
+ struct wmi_10_4_phyerr_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ /* 10.4 firmware always reports only one phyerr */
+ arg->num_phyerrs = 1;
+
+ arg->tsf_l32 = __le32_to_cpu(ev->tsf_l32);
+ arg->tsf_u32 = __le32_to_cpu(ev->tsf_u32);
+ arg->buf_len = skb->len;
+ arg->phyerrs = skb->data;
+
+ return 0;
+}
+
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar,
+ const void *phyerr_buf,
+ int left_len,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ const struct wmi_phyerr *phyerr = phyerr_buf;
+ int i;
+
+ if (left_len < sizeof(*phyerr)) {
+ ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+ left_len, sizeof(*phyerr));
+ return -EINVAL;
+ }
+
+ arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+ arg->freq1 = __le16_to_cpu(phyerr->freq1);
+ arg->freq2 = __le16_to_cpu(phyerr->freq2);
+ arg->rssi_combined = phyerr->rssi_combined;
+ arg->chan_width_mhz = phyerr->chan_width_mhz;
+ arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+ arg->buf = phyerr->buf;
+ arg->hdr_len = sizeof(*phyerr);
+
+ for (i = 0; i < 4; i++)
+ arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+ switch (phyerr->phy_err_code) {
+ case PHY_ERROR_GEN_SPECTRAL_SCAN:
+ arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+ break;
+ case PHY_ERROR_GEN_FALSE_RADAR_EXT:
+ arg->phy_err_code = PHY_ERROR_FALSE_RADAR_EXT;
+ break;
+ case PHY_ERROR_GEN_RADAR:
+ arg->phy_err_code = PHY_ERROR_RADAR;
+ break;
+ default:
+ arg->phy_err_code = PHY_ERROR_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int ath10k_wmi_10_4_op_pull_phyerr_ev(struct ath10k *ar,
+ const void *phyerr_buf,
+ int left_len,
+ struct wmi_phyerr_ev_arg *arg)
+{
+ const struct wmi_10_4_phyerr_event *phyerr = phyerr_buf;
+ u32 phy_err_mask;
+ int i;
+
+ if (left_len < sizeof(*phyerr)) {
+ ath10k_warn(ar, "wrong phyerr event head len %d (need: >=%zd)\n",
+ left_len, sizeof(*phyerr));
+ return -EINVAL;
+ }
+
+ arg->tsf_timestamp = __le32_to_cpu(phyerr->tsf_timestamp);
+ arg->freq1 = __le16_to_cpu(phyerr->freq1);
+ arg->freq2 = __le16_to_cpu(phyerr->freq2);
+ arg->rssi_combined = phyerr->rssi_combined;
+ arg->chan_width_mhz = phyerr->chan_width_mhz;
+ arg->buf_len = __le32_to_cpu(phyerr->buf_len);
+ arg->buf = phyerr->buf;
+ arg->hdr_len = sizeof(*phyerr);
+
+ for (i = 0; i < 4; i++)
+ arg->nf_chains[i] = __le16_to_cpu(phyerr->nf_chains[i]);
+
+ phy_err_mask = __le32_to_cpu(phyerr->phy_err_mask[0]);
+
+ if (phy_err_mask & PHY_ERROR_10_4_SPECTRAL_SCAN_MASK)
+ arg->phy_err_code = PHY_ERROR_SPECTRAL_SCAN;
+ else if (phy_err_mask & PHY_ERROR_10_4_RADAR_MASK)
+ arg->phy_err_code = PHY_ERROR_RADAR;
+ else
+ arg->phy_err_code = PHY_ERROR_UNKNOWN;
+
+ return 0;
+}
+
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_phyerr_hdr_arg hdr_arg = {};
+ struct wmi_phyerr_ev_arg phyerr_arg = {};
+ const void *phyerr;
+ u32 count, i, buf_len, phy_err_code;
+ u64 tsf;
+ int left_len, ret;
+
+ ATH10K_DFS_STAT_INC(ar, phy_errors);
+
+ ret = ath10k_wmi_pull_phyerr_hdr(ar, skb, &hdr_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse phyerr event hdr: %d\n", ret);
+ return;
+ }
+
+ /* Check number of included events */
+ count = hdr_arg.num_phyerrs;
+
+ left_len = hdr_arg.buf_len;
+
+ tsf = hdr_arg.tsf_u32;
+ tsf <<= 32;
+ tsf |= hdr_arg.tsf_l32;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event phyerr count %d tsf64 0x%llX\n",
+ count, tsf);
+
+ phyerr = hdr_arg.phyerrs;
+ for (i = 0; i < count; i++) {
+ ret = ath10k_wmi_pull_phyerr(ar, phyerr, left_len, &phyerr_arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse phyerr event (%d)\n",
+ i);
+ return;
+ }
+
+ left_len -= phyerr_arg.hdr_len;
+ buf_len = phyerr_arg.buf_len;
+ phy_err_code = phyerr_arg.phy_err_code;
+
+ if (left_len < buf_len) {
+ ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
+ return;
+ }
+
+ left_len -= buf_len;
+
+ switch (phy_err_code) {
+ case PHY_ERROR_RADAR:
+ ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
+ break;
+ case PHY_ERROR_SPECTRAL_SCAN:
+ ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
+ break;
+ case PHY_ERROR_FALSE_RADAR_EXT:
+ ath10k_wmi_event_dfs(ar, &phyerr_arg, tsf);
+ ath10k_wmi_event_spectral_scan(ar, &phyerr_arg, tsf);
+ break;
+ default:
+ break;
+ }
+
+ phyerr = phyerr + phyerr_arg.hdr_len + buf_len;
+ }
+}
+
+static int
+ath10k_wmi_10_4_op_pull_dfs_status_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_dfs_status_ev_arg *arg)
+{
+ struct wmi_dfs_status_ev_arg *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ arg->status = ev->status;
+
+ return 0;
+}
+
+static void
+ath10k_wmi_event_dfs_status_check(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_dfs_status_ev_arg status_arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_dfs_status(ar, skb, &status_arg);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to parse dfs status event: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
+ "dfs status event received from fw: %d\n",
+ status_arg.status);
+
+ /* Even in case of radar detection failure we follow the same
+ * behaviour as if radar is detected i.e to switch to a different
+ * channel.
+ */
+ if (status_arg.status == WMI_HW_RADAR_DETECTED ||
+ status_arg.status == WMI_RADAR_DETECTION_FAIL)
+ ath10k_radar_detected(ar);
+ complete(&ar->wmi.radar_confirm);
+}
+
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_roam_ev_arg arg = {};
+ int ret;
+ u32 vdev_id;
+ u32 reason;
+ s32 rssi;
+
+ ret = ath10k_wmi_pull_roam_ev(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse roam event: %d\n", ret);
+ return;
+ }
+
+ vdev_id = __le32_to_cpu(arg.vdev_id);
+ reason = __le32_to_cpu(arg.reason);
+ rssi = __le32_to_cpu(arg.rssi);
+ rssi += WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi roam event vdev %u reason 0x%08x rssi %d\n",
+ vdev_id, reason, rssi);
+
+ if (reason >= WMI_ROAM_REASON_MAX)
+ ath10k_warn(ar, "ignoring unknown roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+
+ switch (reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ ath10k_mac_handle_beacon_miss(ar, vdev_id);
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath10k_warn(ar, "ignoring not implemented roam event reason %d on vdev %i\n",
+ reason, vdev_id);
+ break;
+ }
+}
+
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PROFILE_MATCH\n");
+}
+
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb)
+{
+ char buf[101], c;
+ int i;
+
+ for (i = 0; i < sizeof(buf) - 1; i++) {
+ if (i >= skb->len)
+ break;
+
+ c = skb->data[i];
+
+ if (c == '\0')
+ break;
+
+ if (isascii(c) && isprint(c))
+ buf[i] = c;
+ else
+ buf[i] = '.';
+ }
+
+ if (i == sizeof(buf) - 1)
+ ath10k_warn(ar, "wmi debug print truncated: %d\n", skb->len);
+
+ /* for some reason the debug prints end with \n, remove that */
+ if (skb->data[i - 1] == '\n')
+ i--;
+
+ /* the last byte is always reserved for the null character */
+ buf[i] = '\0';
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI_PRINT, "wmi print '%s'\n", buf);
+}
+
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_QVIT_EVENTID\n");
+}
+
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_WLAN_PROFILE_DATA_EVENTID\n");
+}
+
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TSF_MEASUREMENT_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_RTT_ERROR_REPORT_EVENTID\n");
+}
+
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_wow_ev_arg ev = {};
+ int ret;
+
+ complete(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_pull_wow_event(ar, skb, &ev);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse wow wakeup event: %d\n", ret);
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wow wakeup host reason %s\n",
+ wow_reason(ev.wake_reason));
+}
+
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_DCS_INTERFERENCE_EVENTID\n");
+}
+
+static u8 ath10k_tpc_config_get_rate(struct ath10k *ar,
+ struct wmi_pdev_tpc_config_event *ev,
+ u32 rate_idx, u32 num_chains,
+ u32 rate_code, u8 type)
+{
+ u8 tpc, num_streams, preamble, ch, stm_idx;
+
+ num_streams = ATH10K_HW_NSS(rate_code);
+ preamble = ATH10K_HW_PREAMBLE(rate_code);
+ ch = num_chains - 1;
+
+ tpc = min_t(u8, ev->rates_array[rate_idx], ev->max_reg_allow_pow[ch]);
+
+ if (__le32_to_cpu(ev->num_tx_chain) <= 1)
+ goto out;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK)
+ goto out;
+
+ stm_idx = num_streams - 1;
+ if (num_chains <= num_streams)
+ goto out;
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_STBC:
+ tpc = min_t(u8, tpc,
+ ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ tpc = min_t(u8, tpc,
+ ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_CDD:
+ tpc = min_t(u8, tpc,
+ ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx]);
+ break;
+ default:
+ ath10k_warn(ar, "unknown wmi tpc table type: %d\n", type);
+ tpc = 0;
+ break;
+ }
+
+out:
+ return tpc;
+}
+
+static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
+ struct wmi_pdev_tpc_config_event *ev,
+ struct ath10k_tpc_stats *tpc_stats,
+ u8 *rate_code, u16 *pream_table, u8 type)
+{
+ u32 i, j, pream_idx, flags;
+ u8 tpc[WMI_TPC_TX_N_CHAIN];
+ char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+ char buff[WMI_TPC_BUF_SIZE];
+
+ flags = __le32_to_cpu(ev->flags);
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_CDD:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_STBC:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "invalid table type in wmi tpc event: %d\n", type);
+ return;
+ }
+
+ pream_idx = 0;
+ for (i = 0; i < tpc_stats->rate_max; i++) {
+ memset(tpc_value, 0, sizeof(tpc_value));
+ memset(buff, 0, sizeof(buff));
+ if (i == pream_table[pream_idx])
+ pream_idx++;
+
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
+ tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
+ rate_code[i],
+ type);
+ snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
+ strlcat(tpc_value, buff, sizeof(tpc_value));
+ }
+ tpc_stats->tpc_table[type].pream_idx[i] = pream_idx;
+ tpc_stats->tpc_table[type].rate_code[i] = rate_code[i];
+ memcpy(tpc_stats->tpc_table[type].tpc_value[i],
+ tpc_value, sizeof(tpc_value));
+ }
+}
+
+void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
+ u32 num_tx_chain)
+{
+ u32 i, j, pream_idx;
+ u8 rate_idx;
+
+ /* Create the rate code table based on the chains supported */
+ rate_idx = 0;
+ pream_idx = 0;
+
+ /* Fill CCK rate code */
+ for (i = 0; i < 4; i++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_CCK);
+ rate_idx++;
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill OFDM rate code */
+ for (i = 0; i < 8; i++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(i, 0, WMI_RATE_PREAMBLE_OFDM);
+ rate_idx++;
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill HT20 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 8; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill HT40 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 8; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_HT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill VHT20 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 10; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill VHT40 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 10; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ /* Fill VHT80 rate code */
+ for (i = 0; i < num_tx_chain; i++) {
+ for (j = 0; j < 10; j++) {
+ rate_code[rate_idx] =
+ ATH10K_HW_RATECODE(j, i, WMI_RATE_PREAMBLE_VHT);
+ rate_idx++;
+ }
+ }
+ pream_table[pream_idx] = rate_idx;
+ pream_idx++;
+
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_CCK);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+ rate_code[rate_idx++] =
+ ATH10K_HW_RATECODE(0, 0, WMI_RATE_PREAMBLE_OFDM);
+
+ pream_table[pream_idx] = ATH10K_TPC_PREAM_TABLE_END;
+}
+
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+{
+ u32 num_tx_chain, rate_max;
+ u8 rate_code[WMI_TPC_RATE_MAX];
+ u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+ struct wmi_pdev_tpc_config_event *ev;
+ struct ath10k_tpc_stats *tpc_stats;
+
+ ev = (struct wmi_pdev_tpc_config_event *)skb->data;
+
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+
+ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
+ ath10k_warn(ar, "number of tx chain is %d greater than TPC configured tx chain %d\n",
+ num_tx_chain, WMI_TPC_TX_N_CHAIN);
+ return;
+ }
+
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
+ rate_max, WMI_TPC_RATE_MAX);
+ rate_max = WMI_TPC_RATE_MAX;
+ }
+
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+
+ ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
+ num_tx_chain);
+
+ tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
+ tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
+ tpc_stats->ctl = __le32_to_cpu(ev->ctl);
+ tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
+ tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
+ tpc_stats->twice_antenna_reduction =
+ __le32_to_cpu(ev->twice_antenna_reduction);
+ tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+ tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
+
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_CDD);
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_STBC);
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_TXBF);
+
+ ath10k_debug_tpc_stats_process(ar, tpc_stats);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event tpc config channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
+ __le32_to_cpu(ev->chan_freq),
+ __le32_to_cpu(ev->phy_mode),
+ __le32_to_cpu(ev->ctl),
+ __le32_to_cpu(ev->reg_domain),
+ a_sle32_to_cpu(ev->twice_antenna_gain),
+ __le32_to_cpu(ev->twice_antenna_reduction),
+ __le32_to_cpu(ev->power_limit),
+ __le32_to_cpu(ev->twice_max_rd_power) / 2,
+ __le32_to_cpu(ev->num_tx_chain),
+ __le32_to_cpu(ev->rate_max));
+}
+
+static u8
+ath10k_wmi_tpc_final_get_rate(struct ath10k *ar,
+ struct wmi_pdev_tpc_final_table_event *ev,
+ u32 rate_idx, u32 num_chains,
+ u32 rate_code, u8 type, u32 pream_idx)
+{
+ u8 tpc, num_streams, preamble, ch, stm_idx;
+ s8 pow_agcdd, pow_agstbc, pow_agtxbf;
+ int pream;
+
+ num_streams = ATH10K_HW_NSS(rate_code);
+ preamble = ATH10K_HW_PREAMBLE(rate_code);
+ ch = num_chains - 1;
+ stm_idx = num_streams - 1;
+ pream = -1;
+
+ if (__le32_to_cpu(ev->chan_freq) <= 2483) {
+ switch (pream_idx) {
+ case WMI_TPC_PREAM_2GHZ_CCK:
+ pream = 0;
+ break;
+ case WMI_TPC_PREAM_2GHZ_OFDM:
+ pream = 1;
+ break;
+ case WMI_TPC_PREAM_2GHZ_HT20:
+ case WMI_TPC_PREAM_2GHZ_VHT20:
+ pream = 2;
+ break;
+ case WMI_TPC_PREAM_2GHZ_HT40:
+ case WMI_TPC_PREAM_2GHZ_VHT40:
+ pream = 3;
+ break;
+ case WMI_TPC_PREAM_2GHZ_VHT80:
+ pream = 4;
+ break;
+ default:
+ pream = -1;
+ break;
+ }
+ }
+
+ if (__le32_to_cpu(ev->chan_freq) >= 5180) {
+ switch (pream_idx) {
+ case WMI_TPC_PREAM_5GHZ_OFDM:
+ pream = 0;
+ break;
+ case WMI_TPC_PREAM_5GHZ_HT20:
+ case WMI_TPC_PREAM_5GHZ_VHT20:
+ pream = 1;
+ break;
+ case WMI_TPC_PREAM_5GHZ_HT40:
+ case WMI_TPC_PREAM_5GHZ_VHT40:
+ pream = 2;
+ break;
+ case WMI_TPC_PREAM_5GHZ_VHT80:
+ pream = 3;
+ break;
+ case WMI_TPC_PREAM_5GHZ_HTCUP:
+ pream = 4;
+ break;
+ default:
+ pream = -1;
+ break;
+ }
+ }
+
+ if (pream == -1) {
+ ath10k_warn(ar, "unknown wmi tpc final index and frequency: %u, %u\n",
+ pream_idx, __le32_to_cpu(ev->chan_freq));
+ tpc = 0;
+ goto out;
+ }
+
+ if (pream == 4)
+ tpc = min_t(u8, ev->rates_array[rate_idx],
+ ev->max_reg_allow_pow[ch]);
+ else
+ tpc = min_t(u8, min_t(u8, ev->rates_array[rate_idx],
+ ev->max_reg_allow_pow[ch]),
+ ev->ctl_power_table[0][pream][stm_idx]);
+
+ if (__le32_to_cpu(ev->num_tx_chain) <= 1)
+ goto out;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK)
+ goto out;
+
+ if (num_chains <= num_streams)
+ goto out;
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_STBC:
+ pow_agstbc = ev->max_reg_allow_pow_agstbc[ch - 1][stm_idx];
+ if (pream == 4)
+ tpc = min_t(u8, tpc, pow_agstbc);
+ else
+ tpc = min_t(u8, min_t(u8, tpc, pow_agstbc),
+ ev->ctl_power_table[0][pream][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ pow_agtxbf = ev->max_reg_allow_pow_agtxbf[ch - 1][stm_idx];
+ if (pream == 4)
+ tpc = min_t(u8, tpc, pow_agtxbf);
+ else
+ tpc = min_t(u8, min_t(u8, tpc, pow_agtxbf),
+ ev->ctl_power_table[1][pream][stm_idx]);
+ break;
+ case WMI_TPC_TABLE_TYPE_CDD:
+ pow_agcdd = ev->max_reg_allow_pow_agcdd[ch - 1][stm_idx];
+ if (pream == 4)
+ tpc = min_t(u8, tpc, pow_agcdd);
+ else
+ tpc = min_t(u8, min_t(u8, tpc, pow_agcdd),
+ ev->ctl_power_table[0][pream][stm_idx]);
+ break;
+ default:
+ ath10k_warn(ar, "unknown wmi tpc final table type: %d\n", type);
+ tpc = 0;
+ break;
+ }
+
+out:
+ return tpc;
+}
+
+static void
+ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
+ struct wmi_pdev_tpc_final_table_event *ev,
+ struct ath10k_tpc_stats_final *tpc_stats,
+ u8 *rate_code, u16 *pream_table, u8 type)
+{
+ u32 i, j, pream_idx, flags;
+ u8 tpc[WMI_TPC_TX_N_CHAIN];
+ char tpc_value[WMI_TPC_TX_N_CHAIN * WMI_TPC_BUF_SIZE];
+ char buff[WMI_TPC_BUF_SIZE];
+
+ flags = __le32_to_cpu(ev->flags);
+
+ switch (type) {
+ case WMI_TPC_TABLE_TYPE_CDD:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "CDD not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_STBC:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "STBC not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ case WMI_TPC_TABLE_TYPE_TXBF:
+ if (!(flags & WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF)) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "TXBF not supported\n");
+ tpc_stats->flag[type] = ATH10K_TPC_TABLE_TYPE_FLAG;
+ return;
+ }
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "invalid table type in wmi tpc event: %d\n", type);
+ return;
+ }
+
+ pream_idx = 0;
+ for (i = 0; i < tpc_stats->rate_max; i++) {
+ memset(tpc_value, 0, sizeof(tpc_value));
+ memset(buff, 0, sizeof(buff));
+ if (i == pream_table[pream_idx])
+ pream_idx++;
+
+ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
+ tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
+ rate_code[i],
+ type, pream_idx);
+ snprintf(buff, sizeof(buff), "%8d ", tpc[j]);
+ strlcat(tpc_value, buff, sizeof(tpc_value));
+ }
+ tpc_stats->tpc_table_final[type].pream_idx[i] = pream_idx;
+ tpc_stats->tpc_table_final[type].rate_code[i] = rate_code[i];
+ memcpy(tpc_stats->tpc_table_final[type].tpc_value[i],
+ tpc_value, sizeof(tpc_value));
+ }
+}
+
+void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
+{
+ u32 num_tx_chain, rate_max;
+ u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
+ u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+ struct wmi_pdev_tpc_final_table_event *ev;
+ struct ath10k_tpc_stats_final *tpc_stats;
+
+ ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
+
+ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
+ ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
+ num_tx_chain, WMI_TPC_TX_N_CHAIN);
+ return;
+ }
+
+ rate_max = __le32_to_cpu(ev->rate_max);
+ if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
+ ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
+ rate_max, WMI_TPC_FINAL_RATE_MAX);
+ rate_max = WMI_TPC_FINAL_RATE_MAX;
+ }
+
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+
+ ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
+ num_tx_chain);
+
+ tpc_stats->chan_freq = __le32_to_cpu(ev->chan_freq);
+ tpc_stats->phy_mode = __le32_to_cpu(ev->phy_mode);
+ tpc_stats->ctl = __le32_to_cpu(ev->ctl);
+ tpc_stats->reg_domain = __le32_to_cpu(ev->reg_domain);
+ tpc_stats->twice_antenna_gain = a_sle32_to_cpu(ev->twice_antenna_gain);
+ tpc_stats->twice_antenna_reduction =
+ __le32_to_cpu(ev->twice_antenna_reduction);
+ tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+ tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+ tpc_stats->num_tx_chain = num_tx_chain;
+ tpc_stats->rate_max = rate_max;
+
+ ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_CDD);
+ ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_STBC);
+ ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+ WMI_TPC_TABLE_TYPE_TXBF);
+
+ ath10k_debug_tpc_stats_final_process(ar, tpc_stats);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event tpc final table channel %d mode %d ctl %d regd %d gain %d %d limit %d max_power %d tx_chanins %d rates %d\n",
+ __le32_to_cpu(ev->chan_freq),
+ __le32_to_cpu(ev->phy_mode),
+ __le32_to_cpu(ev->ctl),
+ __le32_to_cpu(ev->reg_domain),
+ a_sle32_to_cpu(ev->twice_antenna_gain),
+ __le32_to_cpu(ev->twice_antenna_reduction),
+ __le32_to_cpu(ev->power_limit),
+ __le32_to_cpu(ev->twice_max_rd_power) / 2,
+ __le32_to_cpu(ev->num_tx_chain),
+ __le32_to_cpu(ev->rate_max));
+}
+
+static void
+ath10k_wmi_handle_tdls_peer_event(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_tdls_peer_event *ev;
+ struct ath10k_peer *peer;
+ struct ath10k_vif *arvif;
+ int vdev_id;
+ int peer_status;
+ int peer_reason;
+ u8 reason;
+
+ if (skb->len < sizeof(*ev)) {
+ ath10k_err(ar, "received tdls peer event with invalid size (%d bytes)\n",
+ skb->len);
+ return;
+ }
+
+ ev = (struct wmi_tdls_peer_event *)skb->data;
+ vdev_id = __le32_to_cpu(ev->vdev_id);
+ peer_status = __le32_to_cpu(ev->peer_status);
+ peer_reason = __le32_to_cpu(ev->peer_reason);
+
+ spin_lock_bh(&ar->data_lock);
+ peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (!peer) {
+ ath10k_warn(ar, "failed to find peer entry for %pM\n",
+ ev->peer_macaddr.addr);
+ return;
+ }
+
+ switch (peer_status) {
+ case WMI_TDLS_SHOULD_TEARDOWN:
+ switch (peer_reason) {
+ case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
+ case WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE:
+ case WMI_TDLS_TEARDOWN_REASON_RSSI:
+ reason = WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE;
+ break;
+ default:
+ reason = WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED;
+ break;
+ }
+
+ arvif = ath10k_get_arvif(ar, vdev_id);
+ if (!arvif) {
+ ath10k_warn(ar, "received tdls peer event for invalid vdev id %u\n",
+ vdev_id);
+ return;
+ }
+
+ ieee80211_tdls_oper_request(arvif->vif, ev->peer_macaddr.addr,
+ NL80211_TDLS_TEARDOWN, reason,
+ GFP_ATOMIC);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received tdls teardown event for peer %pM reason %u\n",
+ ev->peer_macaddr.addr, peer_reason);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received unknown tdls peer event %u\n",
+ peer_status);
+ break;
+ }
+}
+
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_PDEV_FTM_INTG_EVENTID\n");
+}
+
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_OFFLOAD_STATUS_EVENTID\n");
+}
+
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_GTK_REKEY_FAIL_EVENTID\n");
+}
+
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_DELBA_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_TX_ADDBA_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
+}
+
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
+}
+
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb)
+{
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
+}
+
+static int ath10k_wmi_alloc_chunk(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
+{
+ dma_addr_t paddr;
+ u32 pool_size;
+ int idx = ar->wmi.num_mem_chunks;
+ void *vaddr;
+
+ pool_size = num_units * round_up(unit_len, 4);
+ vaddr = dma_zalloc_coherent(ar->dev, pool_size, &paddr, GFP_KERNEL);
+
+ if (!vaddr)
+ return -ENOMEM;
+
+ ar->wmi.mem_chunks[idx].vaddr = vaddr;
+ ar->wmi.mem_chunks[idx].paddr = paddr;
+ ar->wmi.mem_chunks[idx].len = pool_size;
+ ar->wmi.mem_chunks[idx].req_id = req_id;
+ ar->wmi.num_mem_chunks++;
+
+ return num_units;
+}
+
+static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
+ u32 num_units, u32 unit_len)
+{
+ int ret;
+
+ while (num_units) {
+ ret = ath10k_wmi_alloc_chunk(ar, req_id, num_units, unit_len);
+ if (ret < 0)
+ return ret;
+
+ num_units -= ret;
+ }
+
+ return 0;
+}
+
+static bool
+ath10k_wmi_is_host_mem_allocated(struct ath10k *ar,
+ const struct wlan_host_mem_req **mem_reqs,
+ u32 num_mem_reqs)
+{
+ u32 req_id, num_units, unit_size, num_unit_info;
+ u32 pool_size;
+ int i, j;
+ bool found;
+
+ if (ar->wmi.num_mem_chunks != num_mem_reqs)
+ return false;
+
+ for (i = 0; i < num_mem_reqs; ++i) {
+ req_id = __le32_to_cpu(mem_reqs[i]->req_id);
+ num_units = __le32_to_cpu(mem_reqs[i]->num_units);
+ unit_size = __le32_to_cpu(mem_reqs[i]->unit_size);
+ num_unit_info = __le32_to_cpu(mem_reqs[i]->num_unit_info);
+
+ if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+ if (ar->num_active_peers)
+ num_units = ar->num_active_peers + 1;
+ else
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+ num_units = ar->max_num_vdevs + 1;
+ }
+
+ found = false;
+ for (j = 0; j < ar->wmi.num_mem_chunks; j++) {
+ if (ar->wmi.mem_chunks[j].req_id == req_id) {
+ pool_size = num_units * round_up(unit_size, 4);
+ if (ar->wmi.mem_chunks[j].len == pool_size) {
+ found = true;
+ break;
+ }
+ }
+ }
+ if (!found)
+ return false;
+ }
+
+ return true;
+}
+
+static int
+ath10k_wmi_main_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_service_ready_event *ev;
+ size_t i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->sw_version;
+ arg->sw_ver1 = ev->sw_version_1;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
+ arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+static int
+ath10k_wmi_10x_op_pull_svc_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_svc_rdy_ev_arg *arg)
+{
+ struct wmi_10x_service_ready_event *ev;
+ int i, n;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ ev = (void *)skb->data;
+ skb_pull(skb, sizeof(*ev));
+ arg->min_tx_power = ev->hw_min_tx_power;
+ arg->max_tx_power = ev->hw_max_tx_power;
+ arg->ht_cap = ev->ht_cap_info;
+ arg->vht_cap = ev->vht_cap_info;
+ arg->sw_ver0 = ev->sw_version;
+ arg->phy_capab = ev->phy_capability;
+ arg->num_rf_chains = ev->num_rf_chains;
+ arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
+ arg->low_5ghz_chan = ev->hal_reg_capabilities.low_5ghz_chan;
+ arg->high_5ghz_chan = ev->hal_reg_capabilities.high_5ghz_chan;
+ arg->num_mem_reqs = ev->num_mem_reqs;
+ arg->service_map = ev->wmi_service_bitmap;
+ arg->service_map_len = sizeof(ev->wmi_service_bitmap);
+
+ n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
+ ARRAY_SIZE(arg->mem_reqs));
+ for (i = 0; i < n; i++)
+ arg->mem_reqs[i] = &ev->mem_reqs[i];
+
+ if (skb->len <
+ __le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
+ return -EPROTO;
+
+ return 0;
+}
+
+static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, svc_rdy_work);
+ struct sk_buff *skb = ar->svc_rdy_skb;
+ struct wmi_svc_rdy_ev_arg arg = {};
+ u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
+ int ret;
+ bool allocated;
+
+ if (!skb) {
+ ath10k_warn(ar, "invalid service ready event skb\n");
+ return;
+ }
+
+ ret = ath10k_wmi_pull_svc_rdy(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
+ return;
+ }
+
+ ath10k_wmi_map_svc(ar, arg.service_map, ar->wmi.svc_map,
+ arg.service_map_len);
+
+ ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
+ ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
+ ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
+ ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
+ ar->fw_version_major =
+ (__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
+ ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
+ ar->fw_version_release =
+ (__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
+ ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
+ ar->phy_capability = __le32_to_cpu(arg.phy_capab);
+ ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
+ ar->hw_eeprom_rd = __le32_to_cpu(arg.eeprom_rd);
+ ar->low_5ghz_chan = __le32_to_cpu(arg.low_5ghz_chan);
+ ar->high_5ghz_chan = __le32_to_cpu(arg.high_5ghz_chan);
+
+ ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
+ arg.service_map, arg.service_map_len);
+
+ if (ar->num_rf_chains > ar->max_spatial_stream) {
+ ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ar->num_rf_chains, ar->max_spatial_stream);
+ ar->num_rf_chains = ar->max_spatial_stream;
+ }
+
+ if (!ar->cfg_tx_chainmask) {
+ ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
+ ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
+ }
+
+ if (strlen(ar->hw->wiphy->fw_version) == 0) {
+ snprintf(ar->hw->wiphy->fw_version,
+ sizeof(ar->hw->wiphy->fw_version),
+ "%u.%u.%u.%u",
+ ar->fw_version_major,
+ ar->fw_version_minor,
+ ar->fw_version_release,
+ ar->fw_version_build);
+ }
+
+ num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
+ if (num_mem_reqs > WMI_MAX_MEM_REQS) {
+ ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
+ num_mem_reqs);
+ return;
+ }
+
+ if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
+ ar->max_num_vdevs;
+ else
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+ ar->max_num_vdevs;
+
+ ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
+ ar->max_num_vdevs;
+ ar->num_tids = ar->num_active_peers * 2;
+ ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
+ }
+
+ /* TODO: Adjust max peer count for cases like WMI_SERVICE_RATECTRL_CACHE
+ * and WMI_SERVICE_IRAM_TIDS, etc.
+ */
+
+ allocated = ath10k_wmi_is_host_mem_allocated(ar, arg.mem_reqs,
+ num_mem_reqs);
+ if (allocated)
+ goto skip_mem_alloc;
+
+ /* Either this event is received during boot time or there is a change
+ * in memory requirement from firmware when compared to last request.
+ * Free any old memory and do a fresh allocation based on the current
+ * memory requirement.
+ */
+ ath10k_wmi_free_host_mem(ar);
+
+ for (i = 0; i < num_mem_reqs; ++i) {
+ req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
+ num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
+ unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
+ num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
+
+ if (num_unit_info & NUM_UNITS_IS_NUM_ACTIVE_PEERS) {
+ if (ar->num_active_peers)
+ num_units = ar->num_active_peers + 1;
+ else
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_PEERS) {
+ /* number of units to allocate is number of
+ * peers, 1 extra for self peer on target
+ * this needs to be tied, host and target
+ * can get out of sync
+ */
+ num_units = ar->max_num_peers + 1;
+ } else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS) {
+ num_units = ar->max_num_vdevs + 1;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
+ req_id,
+ __le32_to_cpu(arg.mem_reqs[i]->num_units),
+ num_unit_info,
+ unit_size,
+ num_units);
+
+ ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
+ unit_size);
+ if (ret)
+ return;
+ }
+
+skip_mem_alloc:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x fw_build 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
+ __le32_to_cpu(arg.min_tx_power),
+ __le32_to_cpu(arg.max_tx_power),
+ __le32_to_cpu(arg.ht_cap),
+ __le32_to_cpu(arg.vht_cap),
+ __le32_to_cpu(arg.sw_ver0),
+ __le32_to_cpu(arg.sw_ver1),
+ __le32_to_cpu(arg.fw_build),
+ __le32_to_cpu(arg.phy_capab),
+ __le32_to_cpu(arg.num_rf_chains),
+ __le32_to_cpu(arg.eeprom_rd),
+ __le32_to_cpu(arg.num_mem_reqs));
+
+ dev_kfree_skb(skb);
+ ar->svc_rdy_skb = NULL;
+ complete(&ar->wmi.service_ready);
+}
+
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+ ar->svc_rdy_skb = skb;
+ queue_work(ar->workqueue_aux, &ar->svc_rdy_work);
+}
+
+static int ath10k_wmi_op_pull_rdy_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_rdy_ev_arg *arg)
+{
+ struct wmi_ready_event *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->sw_version = ev->sw_version;
+ arg->abi_version = ev->abi_version;
+ arg->status = ev->status;
+ arg->mac_addr = ev->mac_addr.addr;
+
+ return 0;
+}
+
+static int ath10k_wmi_op_pull_roam_ev(struct ath10k *ar, struct sk_buff *skb,
+ struct wmi_roam_ev_arg *arg)
+{
+ struct wmi_roam_ev *ev = (void *)skb->data;
+
+ if (skb->len < sizeof(*ev))
+ return -EPROTO;
+
+ skb_pull(skb, sizeof(*ev));
+ arg->vdev_id = ev->vdev_id;
+ arg->reason = ev->reason;
+
+ return 0;
+}
+
+static int ath10k_wmi_op_pull_echo_ev(struct ath10k *ar,
+ struct sk_buff *skb,
+ struct wmi_echo_ev_arg *arg)
+{
+ struct wmi_echo_event *ev = (void *)skb->data;
+
+ arg->value = ev->value;
+
+ return 0;
+}
+
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_rdy_ev_arg arg = {};
+ int ret;
+
+ ret = ath10k_wmi_pull_rdy(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse ready event: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event ready sw_version %u abi_version %u mac_addr %pM status %d\n",
+ __le32_to_cpu(arg.sw_version),
+ __le32_to_cpu(arg.abi_version),
+ arg.mac_addr,
+ __le32_to_cpu(arg.status));
+
+ ether_addr_copy(ar->mac_addr, arg.mac_addr);
+ complete(&ar->wmi.unified_ready);
+ return 0;
+}
+
+void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb)
+{
+ int ret;
+ struct wmi_svc_avail_ev_arg arg = {};
+
+ ret = ath10k_wmi_pull_svc_avail(ar, skb, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to parse service available event: %d\n",
+ ret);
+ }
+
+ /*
+ * Initialization of "arg.service_map_ext_valid" to ZERO is necessary
+ * for the below logic to work.
+ */
+ if (arg.service_map_ext_valid)
+ ath10k_wmi_map_svc_ext(ar, arg.service_map_ext, ar->wmi.svc_map,
+ __le32_to_cpu(arg.service_map_ext_len));
+}
+
+static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
+{
+ const struct wmi_pdev_temperature_event *ev;
+
+ ev = (struct wmi_pdev_temperature_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ ath10k_thermal_event_temperature(ar, __le32_to_cpu(ev->temperature));
+ return 0;
+}
+
+static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event *ev;
+ struct survey_info *survey;
+ u64 busy, total, tx, rx, rx_bss;
+ u32 freq, noise_floor;
+ u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
+ int idx;
+
+ ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ freq = __le32_to_cpu(ev->freq);
+ noise_floor = __le32_to_cpu(ev->noise_floor);
+ busy = __le64_to_cpu(ev->cycle_busy);
+ total = __le64_to_cpu(ev->cycle_total);
+ tx = __le64_to_cpu(ev->cycle_tx);
+ rx = __le64_to_cpu(ev->cycle_rx);
+ rx_bss = __le64_to_cpu(ev->cycle_rx_bss);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ freq, noise_floor, busy, total, tx, rx, rx_bss);
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+ return 0;
+}
+
+static inline void ath10k_wmi_queue_set_coverage_class_work(struct ath10k *ar)
+{
+ if (ar->hw_params.hw_ops->set_coverage_class) {
+ spin_lock_bh(&ar->data_lock);
+
+ /* This call only ensures that the modified coverage class
+ * persists in case the firmware sets the registers back to
+ * their default value. So calling it is only necessary if the
+ * coverage class has a non-zero value.
+ */
+ if (ar->fw_coverage.coverage_class)
+ queue_work(ar->workqueue, &ar->set_coverage_class_work);
+
+ spin_unlock_bh(&ar->data_lock);
+ }
+}
+
+static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ switch (id) {
+ case WMI_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_PDEV_FTM_INTG_EVENTID:
+ ath10k_wmi_event_pdev_ftm_intg(ar, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath10k_wmi_event_gtk_offload_status(ar, skb);
+ break;
+ case WMI_GTK_REKEY_FAIL_EVENTID:
+ ath10k_wmi_event_gtk_rekey_fail(ar, skb);
+ break;
+ case WMI_TX_DELBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_delba_complete(ar, skb);
+ break;
+ case WMI_TX_ADDBA_COMPLETE_EVENTID:
+ ath10k_wmi_event_addba_complete(ar, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath10k_wmi_event_vdev_install_key_complete(ar, skb);
+ break;
+ case WMI_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath10k_wmi_event_service_available(ar, skb);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_1_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10x_event_id id;
+ bool consumed;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10X_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
+ switch (id) {
+ case WMI_10X_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10X_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10X_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10X_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10X_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10X_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10X_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_10X_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_10X_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_10X_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10X_INST_RSSI_STATS_EVENTID:
+ ath10k_wmi_event_inst_rssi_stats(ar, skb);
+ break;
+ case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
+ ath10k_wmi_event_vdev_standby_req(ar, skb);
+ break;
+ case WMI_10X_VDEV_RESUME_REQ_EVENTID:
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
+ break;
+ case WMI_10X_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_10X_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10X_PDEV_UTF_EVENTID:
+ /* ignore utf events */
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10_2_event_id id;
+ bool consumed;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_2_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
+ switch (id) {
+ case WMI_10_2_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10_2_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10_2_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10_2_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10_2_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10_2_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10_2_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10_2_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10_2_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_PROFILE_MATCH:
+ ath10k_wmi_event_profile_match(ar, skb);
+ break;
+ case WMI_10_2_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_PDEV_QVIT_EVENTID:
+ ath10k_wmi_event_pdev_qvit(ar, skb);
+ break;
+ case WMI_10_2_WLAN_PROFILE_DATA_EVENTID:
+ ath10k_wmi_event_wlan_profile_data(ar, skb);
+ break;
+ case WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_measurement_report(ar, skb);
+ break;
+ case WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID:
+ ath10k_wmi_event_tsf_measurement_report(ar, skb);
+ break;
+ case WMI_10_2_RTT_ERROR_REPORT_EVENTID:
+ ath10k_wmi_event_rtt_error_report(ar, skb);
+ break;
+ case WMI_10_2_WOW_WAKEUP_HOST_EVENTID:
+ ath10k_wmi_event_wow_wakeup_host(ar, skb);
+ break;
+ case WMI_10_2_DCS_INTERFERENCE_EVENTID:
+ ath10k_wmi_event_dcs_interference(ar, skb);
+ break;
+ case WMI_10_2_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10_2_INST_RSSI_STATS_EVENTID:
+ ath10k_wmi_event_inst_rssi_stats(ar, skb);
+ break;
+ case WMI_10_2_VDEV_STANDBY_REQ_EVENTID:
+ ath10k_wmi_event_vdev_standby_req(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_VDEV_RESUME_REQ_EVENTID:
+ ath10k_wmi_event_vdev_resume_req(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_10_2_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
+ ath10k_wmi_event_temperature(ar, skb);
+ break;
+ case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
+ case WMI_10_2_RTT_KEEPALIVE_EVENTID:
+ case WMI_10_2_GPIO_INPUT_EVENTID:
+ case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
+ case WMI_10_2_GENERIC_BUFFER_EVENTID:
+ case WMI_10_2_MCAST_BUF_RELEASE_EVENTID:
+ case WMI_10_2_MCAST_LIST_AGEOUT_EVENTID:
+ case WMI_10_2_WDS_PEER_EVENTID:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received event id %d not implemented\n", id);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_10_4_event_id id;
+ bool consumed;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
+
+ if (!skb_pull(skb, sizeof(struct wmi_cmd_hdr)))
+ goto out;
+
+ trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
+
+ consumed = ath10k_tm_event_wmi(ar, id, skb);
+
+ /* Ready event must be handled normally also in UTF mode so that we
+ * know the UTF firmware has booted, others we are just bypass WMI
+ * events to testmode.
+ */
+ if (consumed && id != WMI_10_4_READY_EVENTID) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi testmode consumed 0x%x\n", id);
+ goto out;
+ }
+
+ switch (id) {
+ case WMI_10_4_MGMT_RX_EVENTID:
+ ath10k_wmi_event_mgmt_rx(ar, skb);
+ /* mgmt_rx() owns the skb now! */
+ return;
+ case WMI_10_4_ECHO_EVENTID:
+ ath10k_wmi_event_echo(ar, skb);
+ break;
+ case WMI_10_4_DEBUG_MESG_EVENTID:
+ ath10k_wmi_event_debug_mesg(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_SERVICE_READY_EVENTID:
+ ath10k_wmi_event_service_ready(ar, skb);
+ return;
+ case WMI_10_4_SCAN_EVENTID:
+ ath10k_wmi_event_scan(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_chan_info(ar, skb);
+ break;
+ case WMI_10_4_PHYERR_EVENTID:
+ ath10k_wmi_event_phyerr(ar, skb);
+ break;
+ case WMI_10_4_READY_EVENTID:
+ ath10k_wmi_event_ready(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_PEER_STA_KICKOUT_EVENTID:
+ ath10k_wmi_event_peer_sta_kickout(ar, skb);
+ break;
+ case WMI_10_4_ROAM_EVENTID:
+ ath10k_wmi_event_roam(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_HOST_SWBA_EVENTID:
+ ath10k_wmi_event_host_swba(ar, skb);
+ break;
+ case WMI_10_4_TBTTOFFSET_UPDATE_EVENTID:
+ ath10k_wmi_event_tbttoffset_update(ar, skb);
+ break;
+ case WMI_10_4_DEBUG_PRINT_EVENTID:
+ ath10k_wmi_event_debug_print(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_VDEV_START_RESP_EVENTID:
+ ath10k_wmi_event_vdev_start_resp(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_VDEV_STOPPED_EVENTID:
+ ath10k_wmi_event_vdev_stopped(ar, skb);
+ ath10k_wmi_queue_set_coverage_class_work(ar);
+ break;
+ case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+ case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
+ case WMI_10_4_WDS_PEER_EVENTID:
+ case WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID:
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "received event id %d not implemented\n", id);
+ break;
+ case WMI_10_4_UPDATE_STATS_EVENTID:
+ ath10k_wmi_event_update_stats(ar, skb);
+ break;
+ case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
+ ath10k_wmi_event_temperature(ar, skb);
+ break;
+ case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
+ case WMI_10_4_PDEV_TPC_CONFIG_EVENTID:
+ ath10k_wmi_event_pdev_tpc_config(ar, skb);
+ break;
+ case WMI_10_4_TDLS_PEER_EVENTID:
+ ath10k_wmi_handle_tdls_peer_event(ar, skb);
+ break;
+ case WMI_10_4_PDEV_TPC_TABLE_EVENTID:
+ ath10k_wmi_event_tpc_final_table(ar, skb);
+ break;
+ case WMI_10_4_DFS_STATUS_CHECK_EVENTID:
+ ath10k_wmi_event_dfs_status_check(ar, skb);
+ break;
+ default:
+ ath10k_warn(ar, "Unknown eventid: %d\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
+{
+ int ret;
+
+ ret = ath10k_wmi_rx(ar, skb);
+ if (ret)
+ ath10k_warn(ar, "failed to process wmi rx: %d\n", ret);
+}
+
+int ath10k_wmi_connect(struct ath10k *ar)
+{
+ int status;
+ struct ath10k_htc_svc_conn_req conn_req;
+ struct ath10k_htc_svc_conn_resp conn_resp;
+
+ memset(&ar->wmi.svc_map, 0, sizeof(ar->wmi.svc_map));
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath10k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath10k_wmi_process_rx;
+ conn_req.ep_ops.ep_tx_credits = ath10k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
+
+ status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath10k_warn(ar, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ar->wmi.eid = conn_resp.eid;
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16 rd5g,
+ u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_pdev_set_rd(struct ath10k *ar, u16 rd, u16 rd2g, u16
+ rd5g, u16 ctl2g, u16 ctl5g,
+ enum wmi_dfs_region dfs_reg)
+{
+ struct wmi_pdev_set_regdomain_cmd_10x *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd_10x *)skb->data;
+ cmd->reg_domain = __cpu_to_le32(rd);
+ cmd->reg_domain_2G = __cpu_to_le32(rd2g);
+ cmd->reg_domain_5G = __cpu_to_le32(rd5g);
+ cmd->conformance_test_limit_2G = __cpu_to_le32(ctl2g);
+ cmd->conformance_test_limit_5G = __cpu_to_le32(ctl5g);
+ cmd->dfs_domain = __cpu_to_le32(dfs_reg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x dfs_region %x\n",
+ rd, rd2g, rd5g, ctl2g, ctl5g, dfs_reg);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_suspend(struct ath10k *ar, u32 suspend_opt)
+{
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+ cmd->suspend_opt = __cpu_to_le32(suspend_opt);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_resume(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
+{
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
+ ath10k_warn(ar, "pdev param %d not supported by firmware\n",
+ id);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->param_id = __cpu_to_le32(id);
+ cmd->param_value = __cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
+ id, value);
+ return skb;
+}
+
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks)
+{
+ struct host_memory_chunk *chunk;
+ int i;
+
+ chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
+
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ chunk = &chunks->items[i];
+ chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
+ chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
+ chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi chunk %d len %d requested, addr 0x%llx\n",
+ i,
+ ar->wmi.mem_chunks[i].len,
+ (unsigned long long)ar->wmi.mem_chunks[i].paddr);
+ }
+}
+
+static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config config = {};
+ u32 len, val;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
+ config.num_offload_peers = __cpu_to_le32(TARGET_NUM_OFFLOAD_PEERS);
+
+ config.num_offload_reorder_bufs =
+ __cpu_to_le32(TARGET_NUM_OFFLOAD_REORDER_BUFS);
+
+ config.num_peer_keys = __cpu_to_le32(TARGET_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_MAC_AGGR_DELIM);
+
+ val = TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_VOW_CONFIG);
+
+ config.gtk_offload_max_vdev =
+ __cpu_to_le32(TARGET_GTK_OFFLOAD_MAX_VDEV);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(ar, len);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd *)buf->data;
+
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
+ return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10x *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10x config = {};
+ u32 len, val;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(ar, len);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10x *)buf->data;
+
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
+ return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10_2 *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10x config = {};
+ u32 len, val, features;
+
+ config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
+
+ if (ath10k_peer_stats_enabled(ar)) {
+ config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
+ } else {
+ config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
+ config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
+ }
+
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
+ config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
+ config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+
+ config.scan_max_pending_reqs =
+ __cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
+
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
+
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
+
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10_2_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
+
+ val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
+
+ config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
+
+ config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(ar, len);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10_2 *)buf->data;
+
+ features = WMI_10_2_RX_BATCH_MODE;
+
+ if (test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) &&
+ test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
+ features |= WMI_10_2_COEX_GPIO;
+
+ if (ath10k_peer_stats_enabled(ar))
+ features |= WMI_10_2_PEER_STATS;
+
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ features |= WMI_10_2_BSS_CHAN_INFO;
+
+ cmd->resource_config.feature_mask = __cpu_to_le32(features);
+
+ memcpy(&cmd->resource_config.common, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
+ return buf;
+}
+
+static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
+{
+ struct wmi_init_cmd_10_4 *cmd;
+ struct sk_buff *buf;
+ struct wmi_resource_config_10_4 config = {};
+ u32 len;
+
+ config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
+ config.num_peers = __cpu_to_le32(ar->max_num_peers);
+ config.num_active_peers = __cpu_to_le32(ar->num_active_peers);
+ config.num_tids = __cpu_to_le32(ar->num_tids);
+
+ config.num_offload_peers = __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_PEERS);
+ config.num_offload_reorder_buffs =
+ __cpu_to_le32(TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS);
+ config.num_peer_keys = __cpu_to_le32(TARGET_10_4_NUM_PEER_KEYS);
+ config.ast_skid_limit = __cpu_to_le32(TARGET_10_4_AST_SKID_LIMIT);
+ config.tx_chain_mask = __cpu_to_le32(ar->hw_params.tx_chain_mask);
+ config.rx_chain_mask = __cpu_to_le32(ar->hw_params.rx_chain_mask);
+
+ config.rx_timeout_pri[0] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[1] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[2] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_LO_PRI);
+ config.rx_timeout_pri[3] = __cpu_to_le32(TARGET_10_4_RX_TIMEOUT_HI_PRI);
+
+ config.rx_decap_mode = __cpu_to_le32(ar->wmi.rx_decap_mode);
+ config.scan_max_pending_req = __cpu_to_le32(TARGET_10_4_SCAN_MAX_REQS);
+ config.bmiss_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_BMISS_OFFLOAD_MAX_VDEV);
+ config.roam_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_VDEV);
+ config.roam_offload_max_ap_profiles =
+ __cpu_to_le32(TARGET_10_4_ROAM_OFFLOAD_MAX_PROFILES);
+ config.num_mcast_groups = __cpu_to_le32(TARGET_10_4_NUM_MCAST_GROUPS);
+ config.num_mcast_table_elems =
+ __cpu_to_le32(TARGET_10_4_NUM_MCAST_TABLE_ELEMS);
+
+ config.mcast2ucast_mode = __cpu_to_le32(TARGET_10_4_MCAST2UCAST_MODE);
+ config.tx_dbg_log_size = __cpu_to_le32(TARGET_10_4_TX_DBG_LOG_SIZE);
+ config.num_wds_entries = __cpu_to_le32(TARGET_10_4_NUM_WDS_ENTRIES);
+ config.dma_burst_size = __cpu_to_le32(TARGET_10_4_DMA_BURST_SIZE);
+ config.mac_aggr_delim = __cpu_to_le32(TARGET_10_4_MAC_AGGR_DELIM);
+
+ config.rx_skip_defrag_timeout_dup_detection_check =
+ __cpu_to_le32(TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK);
+
+ config.vow_config = __cpu_to_le32(TARGET_10_4_VOW_CONFIG);
+ config.gtk_offload_max_vdev =
+ __cpu_to_le32(TARGET_10_4_GTK_OFFLOAD_MAX_VDEV);
+ config.num_msdu_desc = __cpu_to_le32(ar->htt.max_num_pending_tx);
+ config.max_frag_entries = __cpu_to_le32(TARGET_10_4_11AC_TX_MAX_FRAGS);
+ config.max_peer_ext_stats =
+ __cpu_to_le32(TARGET_10_4_MAX_PEER_EXT_STATS);
+ config.smart_ant_cap = __cpu_to_le32(TARGET_10_4_SMART_ANT_CAP);
+
+ config.bk_minfree = __cpu_to_le32(TARGET_10_4_BK_MIN_FREE);
+ config.be_minfree = __cpu_to_le32(TARGET_10_4_BE_MIN_FREE);
+ config.vi_minfree = __cpu_to_le32(TARGET_10_4_VI_MIN_FREE);
+ config.vo_minfree = __cpu_to_le32(TARGET_10_4_VO_MIN_FREE);
+
+ config.rx_batchmode = __cpu_to_le32(TARGET_10_4_RX_BATCH_MODE);
+ config.tt_support =
+ __cpu_to_le32(TARGET_10_4_THERMAL_THROTTLING_CONFIG);
+ config.atf_config = __cpu_to_le32(TARGET_10_4_ATF_CONFIG);
+ config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
+ config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
+
+ len = sizeof(*cmd) +
+ (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
+
+ buf = ath10k_wmi_alloc_skb(ar, len);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_init_cmd_10_4 *)buf->data;
+ memcpy(&cmd->resource_config, &config, sizeof(config));
+ ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.4\n");
+ return buf;
+}
+
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
+{
+ if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
+ return -EINVAL;
+ if (arg->n_channels > ARRAY_SIZE(arg->channels))
+ return -EINVAL;
+ if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
+ return -EINVAL;
+ if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
+ return -EINVAL;
+
+ return 0;
+}
+
+static size_t
+ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
+{
+ int len = 0;
+
+ if (arg->ie_len) {
+ len += sizeof(struct wmi_ie_data);
+ len += roundup(arg->ie_len, 4);
+ }
+
+ if (arg->n_channels) {
+ len += sizeof(struct wmi_chan_list);
+ len += sizeof(__le32) * arg->n_channels;
+ }
+
+ if (arg->n_ssids) {
+ len += sizeof(struct wmi_ssid_list);
+ len += sizeof(struct wmi_ssid) * arg->n_ssids;
+ }
+
+ if (arg->n_bssids) {
+ len += sizeof(struct wmi_bssid_list);
+ len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ }
+
+ return len;
+}
+
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg)
+{
+ u32 scan_id;
+ u32 scan_req_id;
+
+ scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
+ scan_id |= arg->scan_id;
+
+ scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+ scan_req_id |= arg->scan_req_id;
+
+ cmn->scan_id = __cpu_to_le32(scan_id);
+ cmn->scan_req_id = __cpu_to_le32(scan_req_id);
+ cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
+ cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
+ cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
+ cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
+ cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
+ cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
+ cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
+ cmn->idle_time = __cpu_to_le32(arg->idle_time);
+ cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
+ cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
+ cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
+}
+
+static void
+ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_ie_data *ie;
+ struct wmi_chan_list *channels;
+ struct wmi_ssid_list *ssids;
+ struct wmi_bssid_list *bssids;
+ void *ptr = tlvs->tlvs;
+ int i;
+
+ if (arg->n_channels) {
+ channels = ptr;
+ channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
+ channels->num_chan = __cpu_to_le32(arg->n_channels);
+
+ for (i = 0; i < arg->n_channels; i++)
+ channels->channel_list[i].freq =
+ __cpu_to_le16(arg->channels[i]);
+
+ ptr += sizeof(*channels);
+ ptr += sizeof(__le32) * arg->n_channels;
+ }
+
+ if (arg->n_ssids) {
+ ssids = ptr;
+ ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
+ ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
+
+ for (i = 0; i < arg->n_ssids; i++) {
+ ssids->ssids[i].ssid_len =
+ __cpu_to_le32(arg->ssids[i].len);
+ memcpy(&ssids->ssids[i].ssid,
+ arg->ssids[i].ssid,
+ arg->ssids[i].len);
+ }
+
+ ptr += sizeof(*ssids);
+ ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
+ }
+
+ if (arg->n_bssids) {
+ bssids = ptr;
+ bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
+ bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
+
+ for (i = 0; i < arg->n_bssids; i++)
+ ether_addr_copy(bssids->bssid_list[i].addr,
+ arg->bssids[i].bssid);
+
+ ptr += sizeof(*bssids);
+ ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
+ }
+
+ if (arg->ie_len) {
+ ie = ptr;
+ ie->tag = __cpu_to_le32(WMI_IE_TAG);
+ ie->ie_len = __cpu_to_le32(arg->ie_len);
+ memcpy(ie->ie_data, arg->ie, arg->ie_len);
+
+ ptr += sizeof(*ie);
+ ptr += roundup(arg->ie_len, 4);
+ }
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_start_scan_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+ int ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_start_scan_cmd *)skb->data;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+
+ cmd->burst_duration_ms = __cpu_to_le32(0);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10x_op_gen_start_scan(struct ath10k *ar,
+ const struct wmi_start_scan_arg *arg)
+{
+ struct wmi_10x_start_scan_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+ int ret;
+
+ ret = ath10k_wmi_start_scan_verify(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ len = sizeof(*cmd) + ath10k_wmi_start_scan_tlvs_len(arg);
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
+
+ ath10k_wmi_put_start_scan_common(&cmd->common, arg);
+ ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi 10x start scan\n");
+ return skb;
+}
+
+void ath10k_wmi_start_scan_init(struct ath10k *ar,
+ struct wmi_start_scan_arg *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_passive = 150;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
+ | WMI_SCAN_EVENT_COMPLETED
+ | WMI_SCAN_EVENT_BSS_CHANNEL
+ | WMI_SCAN_EVENT_FOREIGN_CHANNEL
+ | WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT
+ | WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ arg->n_bssids = 1;
+ arg->bssids[0].bssid = "\xFF\xFF\xFF\xFF\xFF\xFF";
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_stop_scan(struct ath10k *ar,
+ const struct wmi_stop_scan_arg *arg)
+{
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ u32 scan_id;
+ u32 req_id;
+
+ if (arg->req_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+ if (arg->req_type == WMI_SCAN_STOP_ONE && arg->u.scan_id > 0xFFF)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ scan_id = arg->u.scan_id;
+ scan_id |= WMI_HOST_SCAN_REQ_ID_PREFIX;
+
+ req_id = arg->req_id;
+ req_id |= WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+ cmd->req_type = __cpu_to_le32(arg->req_type);
+ cmd->vdev_id = __cpu_to_le32(arg->u.vdev_id);
+ cmd->scan_id = __cpu_to_le32(scan_id);
+ cmd->scan_req_id = __cpu_to_le32(req_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
+ arg->req_id, arg->req_type, arg->u.scan_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_create(struct ath10k *ar, u32 vdev_id,
+ enum wmi_vdev_type type,
+ enum wmi_vdev_subtype subtype,
+ const u8 macaddr[ETH_ALEN])
+{
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_type = __cpu_to_le32(type);
+ cmd->vdev_subtype = __cpu_to_le32(subtype);
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
+ vdev_id, type, subtype, macaddr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_delete(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "WMI vdev delete id %d\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_start(struct ath10k *ar,
+ const struct wmi_vdev_start_request_arg *arg,
+ bool restart)
+{
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ const char *cmdname;
+ u32 flags = 0;
+
+ if (WARN_ON(arg->hidden_ssid && !arg->ssid))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return ERR_PTR(-EINVAL);
+
+ if (restart)
+ cmdname = "restart";
+ else
+ cmdname = "start";
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (arg->hidden_ssid)
+ flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ flags |= WMI_VDEV_START_PMF_ENABLED;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->disable_hw_ack = __cpu_to_le32(arg->disable_hw_ack);
+ cmd->beacon_interval = __cpu_to_le32(arg->bcn_intval);
+ cmd->dtim_period = __cpu_to_le32(arg->dtim_period);
+ cmd->flags = __cpu_to_le32(flags);
+ cmd->bcn_tx_rate = __cpu_to_le32(arg->bcn_tx_rate);
+ cmd->bcn_tx_power = __cpu_to_le32(arg->bcn_tx_power);
+
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = __cpu_to_le32(arg->ssid_len);
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+
+ ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
+ cmdname, arg->vdev_id,
+ flags, arg->channel.freq, arg->channel.mode,
+ cmd->chan.flags, arg->channel.max_power);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_stop(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid)
+{
+ struct wmi_vdev_up_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->vdev_assoc_id = __cpu_to_le32(aid);
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_down(struct ath10k *ar, u32 vdev_id)
+{
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi mgmt vdev down id 0x%x\n", vdev_id);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_set_param(struct ath10k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "vdev param %d not supported by firmware\n",
+ param_id);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev id 0x%x set param %d value %d\n",
+ vdev_id, param_id, param_value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_install_key(struct ath10k *ar,
+ const struct wmi_vdev_install_key_arg *arg)
+{
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (arg->key_cipher == WMI_CIPHER_NONE && arg->key_data != NULL)
+ return ERR_PTR(-EINVAL);
+ if (arg->key_cipher != WMI_CIPHER_NONE && arg->key_data == NULL)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd) + arg->key_len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->key_idx = __cpu_to_le32(arg->key_idx);
+ cmd->key_flags = __cpu_to_le32(arg->key_flags);
+ cmd->key_cipher = __cpu_to_le32(arg->key_cipher);
+ cmd->key_len = __cpu_to_le32(arg->key_len);
+ cmd->key_txmic_len = __cpu_to_le32(arg->key_txmic_len);
+ cmd->key_rxmic_len = __cpu_to_le32(arg->key_rxmic_len);
+
+ if (arg->macaddr)
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ if (arg->key_data)
+ memcpy(cmd->key_data, arg->key_data, arg->key_len);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_conf(struct ath10k *ar,
+ const struct wmi_vdev_spectral_conf_arg *arg)
+{
+ struct wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_spectral_conf_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->scan_count = __cpu_to_le32(arg->scan_count);
+ cmd->scan_period = __cpu_to_le32(arg->scan_period);
+ cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
+ cmd->scan_fft_size = __cpu_to_le32(arg->scan_fft_size);
+ cmd->scan_gc_ena = __cpu_to_le32(arg->scan_gc_ena);
+ cmd->scan_restart_ena = __cpu_to_le32(arg->scan_restart_ena);
+ cmd->scan_noise_floor_ref = __cpu_to_le32(arg->scan_noise_floor_ref);
+ cmd->scan_init_delay = __cpu_to_le32(arg->scan_init_delay);
+ cmd->scan_nb_tone_thr = __cpu_to_le32(arg->scan_nb_tone_thr);
+ cmd->scan_str_bin_thr = __cpu_to_le32(arg->scan_str_bin_thr);
+ cmd->scan_wb_rpt_mode = __cpu_to_le32(arg->scan_wb_rpt_mode);
+ cmd->scan_rssi_rpt_mode = __cpu_to_le32(arg->scan_rssi_rpt_mode);
+ cmd->scan_rssi_thr = __cpu_to_le32(arg->scan_rssi_thr);
+ cmd->scan_pwr_format = __cpu_to_le32(arg->scan_pwr_format);
+ cmd->scan_rpt_mode = __cpu_to_le32(arg->scan_rpt_mode);
+ cmd->scan_bin_scale = __cpu_to_le32(arg->scan_bin_scale);
+ cmd->scan_dbm_adj = __cpu_to_le32(arg->scan_dbm_adj);
+ cmd->scan_chn_mask = __cpu_to_le32(arg->scan_chn_mask);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_vdev_spectral_enable_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->trigger_cmd = __cpu_to_le32(trigger);
+ cmd->enable_cmd = __cpu_to_le32(enable);
+
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_create(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN],
+ enum wmi_peer_type peer_type)
+{
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->peer_type = __cpu_to_le32(peer_type);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer create vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_delete(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN])
+{
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_flush(struct ath10k *ar, u32 vdev_id,
+ const u8 peer_addr[ETH_ALEN], u32 tid_bitmap)
+{
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->peer_tid_bitmap = __cpu_to_le32(tid_bitmap);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
+ vdev_id, peer_addr, tid_bitmap);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
+ const u8 *peer_addr,
+ enum wmi_peer_param param_id,
+ u32 param_value)
+{
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(param_value);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct wmi_sta_powersave_mode_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_sta_powersave_mode_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->sta_ps_mode = __cpu_to_le32(psmode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi set powersave id 0x%x mode %d\n",
+ vdev_id, psmode);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_sta_ps(struct ath10k *ar, u32 vdev_id,
+ enum wmi_sta_powersave_param param_id,
+ u32 value)
+{
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi sta ps param vdev_id 0x%x param %d value %d\n",
+ vdev_id, param_id, value);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_set_ap_ps(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ enum wmi_ap_ps_peer_param param_id, u32 value)
+{
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->param_id = __cpu_to_le32(param_id);
+ cmd->param_value = __cpu_to_le32(value);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
+ vdev_id, param_id, value, mac);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
+ const struct wmi_scan_chan_list_arg *arg)
+{
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel_arg *ch;
+ struct wmi_channel *ci;
+ int len;
+ int i;
+
+ len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-EINVAL);
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
+
+ for (i = 0; i < arg->n_channels; i++) {
+ ch = &arg->channels[i];
+ ci = &cmd->chan_info[i];
+
+ ath10k_wmi_put_wmi_channel(ci, ch);
+ }
+
+ return skb;
+}
+
+static void
+ath10k_wmi_peer_assoc_fill(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_common_peer_assoc_complete_cmd *cmd = buf;
+
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ cmd->peer_new_assoc = __cpu_to_le32(arg->peer_reassoc ? 0 : 1);
+ cmd->peer_associd = __cpu_to_le32(arg->peer_aid);
+ cmd->peer_flags = __cpu_to_le32(arg->peer_flags);
+ cmd->peer_caps = __cpu_to_le32(arg->peer_caps);
+ cmd->peer_listen_intval = __cpu_to_le32(arg->peer_listen_intval);
+ cmd->peer_ht_caps = __cpu_to_le32(arg->peer_ht_caps);
+ cmd->peer_max_mpdu = __cpu_to_le32(arg->peer_max_mpdu);
+ cmd->peer_mpdu_density = __cpu_to_le32(arg->peer_mpdu_density);
+ cmd->peer_rate_caps = __cpu_to_le32(arg->peer_rate_caps);
+ cmd->peer_nss = __cpu_to_le32(arg->peer_num_spatial_streams);
+ cmd->peer_vht_caps = __cpu_to_le32(arg->peer_vht_caps);
+ cmd->peer_phymode = __cpu_to_le32(arg->peer_phymode);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+
+ cmd->peer_legacy_rates.num_rates =
+ __cpu_to_le32(arg->peer_legacy_rates.num_rates);
+ memcpy(cmd->peer_legacy_rates.rates, arg->peer_legacy_rates.rates,
+ arg->peer_legacy_rates.num_rates);
+
+ cmd->peer_ht_rates.num_rates =
+ __cpu_to_le32(arg->peer_ht_rates.num_rates);
+ memcpy(cmd->peer_ht_rates.rates, arg->peer_ht_rates.rates,
+ arg->peer_ht_rates.num_rates);
+
+ cmd->peer_vht_rates.rx_max_rate =
+ __cpu_to_le32(arg->peer_vht_rates.rx_max_rate);
+ cmd->peer_vht_rates.rx_mcs_set =
+ __cpu_to_le32(arg->peer_vht_rates.rx_mcs_set);
+ cmd->peer_vht_rates.tx_max_rate =
+ __cpu_to_le32(arg->peer_vht_rates.tx_max_rate);
+ cmd->peer_vht_rates.tx_mcs_set =
+ __cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_main(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_main_peer_assoc_complete_cmd *cmd = buf;
+
+ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+ memset(cmd->peer_ht_info, 0, sizeof(cmd->peer_ht_info));
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_1(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_2(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_10_2_peer_assoc_complete_cmd *cmd = buf;
+ int max_mcs, max_nss;
+ u32 info0;
+
+ /* TODO: Is using max values okay with firmware? */
+ max_mcs = 0xf;
+ max_nss = 0xf;
+
+ info0 = SM(max_mcs, WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX) |
+ SM(max_nss, WMI_PEER_ASSOC_INFO0_MAX_NSS);
+
+ ath10k_wmi_peer_assoc_fill(ar, buf, arg);
+ cmd->info0 = __cpu_to_le32(info0);
+}
+
+static void
+ath10k_wmi_peer_assoc_fill_10_4(struct ath10k *ar, void *buf,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ struct wmi_10_4_peer_assoc_complete_cmd *cmd = buf;
+
+ ath10k_wmi_peer_assoc_fill_10_2(ar, buf, arg);
+ if (arg->peer_bw_rxnss_override)
+ cmd->peer_bw_rxnss_override =
+ __cpu_to_le32((arg->peer_bw_rxnss_override - 1) |
+ BIT(PEER_BW_RXNSS_OVERRIDE_OFFSET));
+ else
+ cmd->peer_bw_rxnss_override = 0;
+}
+
+static int
+ath10k_wmi_peer_assoc_check_arg(const struct wmi_peer_assoc_complete_arg *arg)
+{
+ if (arg->peer_mpdu_density > 16)
+ return -EINVAL;
+ if (arg->peer_legacy_rates.num_rates > MAX_SUPPORTED_RATES)
+ return -EINVAL;
+ if (arg->peer_ht_rates.num_rates > MAX_SUPPORTED_RATES)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_main_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_main(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_1_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_1_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_1(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_2_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_2(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_op_gen_peer_assoc(struct ath10k *ar,
+ const struct wmi_peer_assoc_complete_arg *arg)
+{
+ size_t len = sizeof(struct wmi_10_4_peer_assoc_complete_cmd);
+ struct sk_buff *skb;
+ int ret;
+
+ ret = ath10k_wmi_peer_assoc_check_arg(arg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_wmi_peer_assoc_fill_10_4(ar, skb->data, arg);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM (%s)\n",
+ arg->vdev_id, arg->addr,
+ arg->peer_reassoc ? "reassociate" : "new");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev get temperature\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct wmi_pdev_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev bss info request type %d\n", type);
+
+ return skb;
+}
+
+/* This function assumes the beacon is already DMA mapped */
+static struct sk_buff *
+ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
+ size_t bcn_len, u32 bcn_paddr, bool dtim_zero,
+ bool deliver_cab)
+{
+ struct wmi_bcn_tx_ref_cmd *cmd;
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ hdr = (struct ieee80211_hdr *)bcn;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ cmd = (struct wmi_bcn_tx_ref_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->data_len = __cpu_to_le32(bcn_len);
+ cmd->data_ptr = __cpu_to_le32(bcn_paddr);
+ cmd->msdu_id = 0;
+ cmd->frame_control = __cpu_to_le32(fc);
+ cmd->flags = 0;
+ cmd->antenna_mask = __cpu_to_le32(WMI_BCN_TX_REF_DEF_ANTENNA);
+
+ if (dtim_zero)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DTIM_ZERO);
+
+ if (deliver_cab)
+ cmd->flags |= __cpu_to_le32(WMI_BCN_TX_REF_FLAG_DELIVER_CAB);
+
+ return skb;
+}
+
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+ const struct wmi_wmm_params_arg *arg)
+{
+ params->cwmin = __cpu_to_le32(arg->cwmin);
+ params->cwmax = __cpu_to_le32(arg->cwmax);
+ params->aifs = __cpu_to_le32(arg->aifs);
+ params->txop = __cpu_to_le32(arg->txop);
+ params->acm = __cpu_to_le32(arg->acm);
+ params->no_ack = __cpu_to_le32(arg->no_ack);
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_wmm(struct ath10k *ar,
+ const struct wmi_wmm_params_all_arg *arg)
+{
+ struct wmi_pdev_set_wmm_params *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_wmm_params *)skb->data;
+ ath10k_wmi_set_wmm_param(&cmd->ac_be, &arg->ac_be);
+ ath10k_wmi_set_wmm_param(&cmd->ac_bk, &arg->ac_bk);
+ ath10k_wmi_set_wmm_param(&cmd->ac_vi, &arg->ac_vi);
+ ath10k_wmi_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_request_stats(struct ath10k *ar, u32 stats_mask)
+{
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->stats_id = __cpu_to_le32(stats_mask);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi request stats 0x%08x\n",
+ stats_mask);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
+ type, delay_ms);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+ u32 log_level)
+{
+ struct wmi_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(log_level,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le32(module_enable);
+ cmd->module_valid = __cpu_to_le32(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules %08x %08x config %08x %08x\n",
+ __le32_to_cpu(cmd->module_enable),
+ __le32_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_op_gen_dbglog_cfg(struct ath10k *ar, u64 module_enable,
+ u32 log_level)
+{
+ struct wmi_10_4_dbglog_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ u32 cfg;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_10_4_dbglog_cfg_cmd *)skb->data;
+
+ if (module_enable) {
+ cfg = SM(log_level,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ } else {
+ /* set back defaults, all modules with WARN level */
+ cfg = SM(ATH10K_DBGLOG_LEVEL_WARN,
+ ATH10K_DBGLOG_CFG_LOG_LVL);
+ module_enable = ~0;
+ }
+
+ cmd->module_enable = __cpu_to_le64(module_enable);
+ cmd->module_valid = __cpu_to_le64(~0);
+ cmd->config_enable = __cpu_to_le32(cfg);
+ cmd->config_valid = __cpu_to_le32(ATH10K_DBGLOG_CFG_LOG_LVL_MASK);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi dbglog cfg modules 0x%016llx 0x%016llx config %08x %08x\n",
+ __le64_to_cpu(cmd->module_enable),
+ __le64_to_cpu(cmd->module_valid),
+ __le32_to_cpu(cmd->config_enable),
+ __le32_to_cpu(cmd->config_valid));
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
+{
+ struct wmi_pdev_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ev_bitmap &= ATH10K_PKTLOG_ANY;
+
+ cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
+ cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi enable pktlog filter 0x%08x\n",
+ ev_bitmap);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pktlog_disable(struct ath10k *ar)
+{
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, 0);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_set_quiet_mode(struct ath10k *ar, u32 period,
+ u32 duration, u32 next_offset,
+ u32 enabled)
+{
+ struct wmi_pdev_set_quiet_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_quiet_cmd *)skb->data;
+ cmd->period = __cpu_to_le32(period);
+ cmd->duration = __cpu_to_le32(duration);
+ cmd->next_start = __cpu_to_le32(next_offset);
+ cmd->enabled = __cpu_to_le32(enabled);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi quiet param: period %u duration %u enabled %d\n",
+ period, duration, enabled);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_clear_resp(struct ath10k *ar, u32 vdev_id,
+ const u8 *mac)
+{
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->buffersize = __cpu_to_le32(buf_size);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->statuscode = __cpu_to_le32(status);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+
+ if (!mac)
+ return ERR_PTR(-EINVAL);
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = __cpu_to_le32(tid);
+ cmd->initiator = __cpu_to_le32(initiator);
+ cmd->reasoncode = __cpu_to_le32(reason);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config(struct ath10k *ar, u32 param)
+{
+ struct wmi_pdev_get_tpc_config_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_get_tpc_config_cmd *)skb->data;
+ cmd->param = __cpu_to_le32(param);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev get tpc config param %d\n", param);
+ return skb;
+}
+
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head)
+{
+ struct ath10k_fw_stats_peer *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head)
+{
+ struct ath10k_fw_stats_vdev *i;
+ size_t num = 0;
+
+ list_for_each_entry(i, head, list)
+ ++num;
+
+ return num;
+}
+
+static void
+ath10k_wmi_fw_pdev_base_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath10k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_extra_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS bad count", pdev->rts_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RTS good count", pdev->rts_good);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "FCS bad count", pdev->fcs_bad);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "No beacon count", pdev->no_beacons);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MIB int count", pdev->mib_int_count);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_tx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath10k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requed", pdev->mpdus_requed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Excessive retries", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Sched self triggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Pdev continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_pdev_rx_stats_fill(const struct ath10k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath10k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_vdev_stats_fill(const struct ath10k_fw_stats_vdev *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "vdev id", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "Peer MAC address", peer->peer_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RSSI", peer->peer_rssi);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer TX rate", peer->peer_tx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX rate", peer->peer_rx_rate);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Peer RX duration", peer->rx_duration);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev *vdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
+ size_t num_vdevs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+ num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+ ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(peer, &fw_stats->peers, list) {
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ unsigned int len = 0;
+ unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev *vdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
+ size_t num_vdevs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+ num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+ ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(peer, &fw_stats->peers, list) {
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
+ u32 detect_level, u32 detect_margin)
+{
+ struct wmi_pdev_set_adaptive_cca_params *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
+ cmd->enable = __cpu_to_le32(enable);
+ cmd->cca_detect_level = __cpu_to_le32(detect_level);
+ cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
+ enable, detect_level, detect_margin);
+ return skb;
+}
+
+static void
+ath10k_wmi_fw_vdev_stats_extd_fill(const struct ath10k_fw_stats_vdev_extd *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ u32 val;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "vdev id", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "ppdu aggr count", vdev->ppdu_aggr_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "ppdu noack", vdev->ppdu_noack);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu queued", vdev->mpdu_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "ppdu nonaggr count", vdev->ppdu_nonaggr_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu sw requeued", vdev->mpdu_sw_requeued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu success retry", vdev->mpdu_suc_retry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu success multitry", vdev->mpdu_suc_multitry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "mpdu fail retry", vdev->mpdu_fail_retry);
+ val = vdev->tx_ftm_suc;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "tx ftm success",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->tx_ftm_suc_retry;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "tx ftm success retry",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->tx_ftm_fail;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "tx ftm fail",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->rx_ftmr_cnt;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "rx ftm request count",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->rx_ftmr_dup_cnt;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "rx ftm request dup count",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->rx_iftmr_cnt;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "rx initial ftm req count",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ val = vdev->rx_iftmr_dup_cnt;
+ if (val & WMI_VDEV_STATS_FTM_COUNT_VALID)
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "rx initial ftm req dup cnt",
+ MS(val, WMI_VDEV_STATS_FTM_COUNT));
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ *length = len;
+}
+
+void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
+ const struct ath10k_fw_stats_pdev *pdev;
+ const struct ath10k_fw_stats_vdev_extd *vdev;
+ const struct ath10k_fw_stats_peer *peer;
+ size_t num_peers;
+ size_t num_vdevs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath10k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath10k_warn(ar, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
+ num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
+
+ ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
+ ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW paused", pdev->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Seqs posted", pdev->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Seqs failed queueing", pdev->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Seqs completed", pdev->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Seqs restarted", pdev->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MU Seqs posted", pdev->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs SW flushed", pdev->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs HW filtered", pdev->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs truncated", pdev->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs receive no ACK", pdev->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs expired", pdev->mpdus_expired);
+
+ ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num Rx Overflow errors", pdev->rx_ovfl_errs);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k VDEV stats", num_vdevs);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+ list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+ ath10k_wmi_fw_vdev_stats_extd_fill(vdev, buf, &len);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath10k PEER stats", num_peers);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(peer, &fw_stats->peers, list) {
+ ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_LEGACY_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_LEGACY_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -ENOTSUPP;
+ }
+ return -ENOTSUPP;
+}
+
+static int ath10k_wmi_10_2_4_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_10_2_4_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_10_2_4_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_VDEV_SUBTYPE_10_2_4_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return -ENOTSUPP;
+ }
+ return -ENOTSUPP;
+}
+
+static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype)
+{
+ switch (subtype) {
+ case WMI_VDEV_SUBTYPE_NONE:
+ return WMI_VDEV_SUBTYPE_10_4_NONE;
+ case WMI_VDEV_SUBTYPE_P2P_DEVICE:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_DEV;
+ case WMI_VDEV_SUBTYPE_P2P_CLIENT:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_CLI;
+ case WMI_VDEV_SUBTYPE_P2P_GO:
+ return WMI_VDEV_SUBTYPE_10_4_P2P_GO;
+ case WMI_VDEV_SUBTYPE_PROXY_STA:
+ return WMI_VDEV_SUBTYPE_10_4_PROXY_STA;
+ case WMI_VDEV_SUBTYPE_MESH_11S:
+ return WMI_VDEV_SUBTYPE_10_4_MESH_11S;
+ case WMI_VDEV_SUBTYPE_MESH_NON_11S:
+ return WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S;
+ }
+ return -ENOTSUPP;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct wmi_ext_resource_config_10_4_cmd *cmd;
+ struct sk_buff *skb;
+ u32 num_tdls_sleep_sta = 0;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, ar->wmi.svc_map))
+ num_tdls_sleep_sta = TARGET_10_4_NUM_TDLS_SLEEP_STA;
+
+ cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
+ cmd->host_platform_config = __cpu_to_le32(type);
+ cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
+ cmd->wlan_gpio_priority = __cpu_to_le32(-1);
+ cmd->coex_version = __cpu_to_le32(WMI_NO_COEX_VERSION_SUPPORT);
+ cmd->coex_gpio_pin1 = __cpu_to_le32(-1);
+ cmd->coex_gpio_pin2 = __cpu_to_le32(-1);
+ cmd->coex_gpio_pin3 = __cpu_to_le32(-1);
+ cmd->num_tdls_vdevs = __cpu_to_le32(TARGET_10_4_NUM_TDLS_VDEVS);
+ cmd->num_tdls_conn_table_entries = __cpu_to_le32(20);
+ cmd->max_tdls_concurrent_sleep_sta = __cpu_to_le32(num_tdls_sleep_sta);
+ cmd->max_tdls_concurrent_buffer_sta =
+ __cpu_to_le32(TARGET_10_4_NUM_TDLS_BUFFER_STA);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi ext resource config host type %d firmware feature bitmap %08x\n",
+ type, fw_feature_bitmap);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_gen_update_fw_tdls_state(struct ath10k *ar, u32 vdev_id,
+ enum wmi_tdls_state state)
+{
+ struct wmi_10_4_tdls_set_state_cmd *cmd;
+ struct sk_buff *skb;
+ u32 options = 0;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ if (test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map) &&
+ state == WMI_TDLS_ENABLE_ACTIVE)
+ state = WMI_TDLS_ENABLE_PASSIVE;
+
+ if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
+ options |= WMI_TDLS_BUFFER_STA_EN;
+
+ cmd = (struct wmi_10_4_tdls_set_state_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(vdev_id);
+ cmd->state = __cpu_to_le32(state);
+ cmd->notification_interval_ms = __cpu_to_le32(5000);
+ cmd->tx_discovery_threshold = __cpu_to_le32(100);
+ cmd->tx_teardown_threshold = __cpu_to_le32(5);
+ cmd->rssi_teardown_threshold = __cpu_to_le32(-75);
+ cmd->rssi_delta = __cpu_to_le32(-20);
+ cmd->tdls_options = __cpu_to_le32(options);
+ cmd->tdls_peer_traffic_ind_window = __cpu_to_le32(2);
+ cmd->tdls_peer_traffic_response_timeout_ms = __cpu_to_le32(5000);
+ cmd->tdls_puapsd_mask = __cpu_to_le32(0xf);
+ cmd->tdls_puapsd_inactivity_time_ms = __cpu_to_le32(0);
+ cmd->tdls_puapsd_rx_frame_threshold = __cpu_to_le32(10);
+ cmd->teardown_notification_ms = __cpu_to_le32(10);
+ cmd->tdls_peer_kickout_threshold = __cpu_to_le32(96);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi update fw tdls state %d for vdev %i\n",
+ state, vdev_id);
+ return skb;
+}
+
+static u32 ath10k_wmi_prepare_peer_qos(u8 uapsd_queues, u8 sp)
+{
+ u32 peer_qos = 0;
+
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ peer_qos |= WMI_TDLS_PEER_QOS_AC_VO;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ peer_qos |= WMI_TDLS_PEER_QOS_AC_VI;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ peer_qos |= WMI_TDLS_PEER_QOS_AC_BK;
+ if (uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ peer_qos |= WMI_TDLS_PEER_QOS_AC_BE;
+
+ peer_qos |= SM(sp, WMI_TDLS_PEER_SP);
+
+ return peer_qos;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid(struct ath10k *ar, u32 param)
+{
+ struct wmi_pdev_get_tpc_table_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_get_tpc_table_cmd *)skb->data;
+ cmd->param = __cpu_to_le32(param);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev get tpc table param:%d\n", param);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_gen_tdls_peer_update(struct ath10k *ar,
+ const struct wmi_tdls_peer_update_cmd_arg *arg,
+ const struct wmi_tdls_peer_capab_arg *cap,
+ const struct wmi_channel_arg *chan_arg)
+{
+ struct wmi_10_4_tdls_peer_update_cmd *cmd;
+ struct wmi_tdls_peer_capabilities *peer_cap;
+ struct wmi_channel *chan;
+ struct sk_buff *skb;
+ u32 peer_qos;
+ int len, chan_len;
+ int i;
+
+ /* tdls peer update cmd has place holder for one channel*/
+ chan_len = cap->peer_chan_len ? (cap->peer_chan_len - 1) : 0;
+
+ len = sizeof(*cmd) + chan_len * sizeof(*chan);
+
+ skb = ath10k_wmi_alloc_skb(ar, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ memset(skb->data, 0, sizeof(*cmd));
+
+ cmd = (struct wmi_10_4_tdls_peer_update_cmd *)skb->data;
+ cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->addr);
+ cmd->peer_state = __cpu_to_le32(arg->peer_state);
+
+ peer_qos = ath10k_wmi_prepare_peer_qos(cap->peer_uapsd_queues,
+ cap->peer_max_sp);
+
+ peer_cap = &cmd->peer_capab;
+ peer_cap->peer_qos = __cpu_to_le32(peer_qos);
+ peer_cap->buff_sta_support = __cpu_to_le32(cap->buff_sta_support);
+ peer_cap->off_chan_support = __cpu_to_le32(cap->off_chan_support);
+ peer_cap->peer_curr_operclass = __cpu_to_le32(cap->peer_curr_operclass);
+ peer_cap->self_curr_operclass = __cpu_to_le32(cap->self_curr_operclass);
+ peer_cap->peer_chan_len = __cpu_to_le32(cap->peer_chan_len);
+ peer_cap->peer_operclass_len = __cpu_to_le32(cap->peer_operclass_len);
+
+ for (i = 0; i < WMI_TDLS_MAX_SUPP_OPER_CLASSES; i++)
+ peer_cap->peer_operclass[i] = cap->peer_operclass[i];
+
+ peer_cap->is_peer_responder = __cpu_to_le32(cap->is_peer_responder);
+ peer_cap->pref_offchan_num = __cpu_to_le32(cap->pref_offchan_num);
+ peer_cap->pref_offchan_bw = __cpu_to_le32(cap->pref_offchan_bw);
+
+ for (i = 0; i < cap->peer_chan_len; i++) {
+ chan = (struct wmi_channel *)&peer_cap->peer_chan_list[i];
+ ath10k_wmi_put_wmi_channel(chan, &chan_arg[i]);
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi tdls peer update vdev %i state %d n_chans %u\n",
+ arg->vdev_id, arg->peer_state, cap->peer_chan_len);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
+ const struct ath10k_radar_found_info *arg)
+{
+ struct wmi_radar_found_info *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_radar_found_info *)skb->data;
+ cmd->pri_min = __cpu_to_le32(arg->pri_min);
+ cmd->pri_max = __cpu_to_le32(arg->pri_max);
+ cmd->width_min = __cpu_to_le32(arg->width_min);
+ cmd->width_max = __cpu_to_le32(arg->width_max);
+ cmd->sidx_min = __cpu_to_le32(arg->sidx_min);
+ cmd->sidx_max = __cpu_to_le32(arg->sidx_max);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi radar found pri_min %d pri_max %d width_min %d width_max %d sidx_min %d sidx_max %d\n",
+ arg->pri_min, arg->pri_max, arg->width_min,
+ arg->width_max, arg->sidx_min, arg->sidx_max);
+ return skb;
+}
+
+static struct sk_buff *
+ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
+{
+ struct wmi_echo_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_echo_cmd *)skb->data;
+ cmd->value = cpu_to_le32(value);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi echo value 0x%08x\n", value);
+ return skb;
+}
+
+int
+ath10k_wmi_barrier(struct ath10k *ar)
+{
+ int ret;
+ int time_left;
+
+ spin_lock_bh(&ar->data_lock);
+ reinit_completion(&ar->wmi.barrier);
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath10k_wmi_echo(ar, ATH10K_WMI_BARRIER_ECHO_ID);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit wmi echo: %d\n", ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->wmi.barrier,
+ ATH10K_WMI_BARRIER_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct wmi_ops wmi_ops = {
+ .rx = ath10k_wmi_op_rx,
+ .map_svc = wmi_main_svc_map,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_main_op_pull_fw_stats,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_op_gen_init,
+ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ /* .gen_pdev_get_temperature not implemented */
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
+ /* .gen_pdev_enable_adaptive_cca not implemented */
+};
+
+static const struct wmi_ops wmi_10_1_ops = {
+ .rx = ath10k_wmi_10_1_op_rx,
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .pull_fw_stats = ath10k_wmi_10x_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_1_op_gen_init,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_peer_assoc = ath10k_wmi_10_1_op_gen_peer_assoc,
+ /* .gen_pdev_get_temperature not implemented */
+
+ /* shared with main branch */
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
+ /* .gen_pdev_enable_adaptive_cca not implemented */
+};
+
+static const struct wmi_ops wmi_10_2_ops = {
+ .rx = ath10k_wmi_10_2_op_rx,
+ .pull_fw_stats = ath10k_wmi_10_2_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_2_op_gen_init,
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+ /* .gen_pdev_get_temperature not implemented */
+
+ /* shared with 10.1 */
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ /* .gen_vdev_wmm_conf not implemented */
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ /* .gen_pdev_enable_adaptive_cca not implemented */
+};
+
+static const struct wmi_ops wmi_10_2_4_ops = {
+ .rx = ath10k_wmi_10_2_op_rx,
+ .pull_fw_stats = ath10k_wmi_10_2_4_op_pull_fw_stats,
+ .gen_init = ath10k_wmi_10_2_op_gen_init,
+ .gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
+ .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
+
+ /* shared with 10.1 */
+ .map_svc = wmi_10x_svc_map,
+ .pull_svc_rdy = ath10k_wmi_10x_op_pull_svc_rdy_ev,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_start_scan = ath10k_wmi_10x_op_gen_start_scan,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_10_2_4_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_op_pull_phyerr_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+ .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
+ .gen_pdev_enable_adaptive_cca =
+ ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
+ .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
+ /* .gen_bcn_tmpl not implemented */
+ /* .gen_prb_tmpl not implemented */
+ /* .gen_p2p_go_bcn_ie not implemented */
+ /* .gen_adaptive_qcs not implemented */
+};
+
+static const struct wmi_ops wmi_10_4_ops = {
+ .rx = ath10k_wmi_10_4_op_rx,
+ .map_svc = wmi_10_4_svc_map,
+
+ .pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
+ .pull_scan = ath10k_wmi_op_pull_scan_ev,
+ .pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
+ .pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
+ .pull_vdev_start = ath10k_wmi_op_pull_vdev_start_ev,
+ .pull_peer_kick = ath10k_wmi_op_pull_peer_kick_ev,
+ .pull_swba = ath10k_wmi_10_4_op_pull_swba_ev,
+ .pull_phyerr_hdr = ath10k_wmi_10_4_op_pull_phyerr_ev_hdr,
+ .pull_phyerr = ath10k_wmi_10_4_op_pull_phyerr_ev,
+ .pull_svc_rdy = ath10k_wmi_main_op_pull_svc_rdy_ev,
+ .pull_rdy = ath10k_wmi_op_pull_rdy_ev,
+ .pull_roam_ev = ath10k_wmi_op_pull_roam_ev,
+ .pull_dfs_status_ev = ath10k_wmi_10_4_op_pull_dfs_status_ev,
+ .get_txbf_conf_scheme = ath10k_wmi_10_4_txbf_conf_scheme,
+
+ .gen_pdev_suspend = ath10k_wmi_op_gen_pdev_suspend,
+ .gen_pdev_resume = ath10k_wmi_op_gen_pdev_resume,
+ .gen_pdev_set_rd = ath10k_wmi_10x_op_gen_pdev_set_rd,
+ .gen_pdev_set_param = ath10k_wmi_op_gen_pdev_set_param,
+ .gen_init = ath10k_wmi_10_4_op_gen_init,
+ .gen_start_scan = ath10k_wmi_op_gen_start_scan,
+ .gen_stop_scan = ath10k_wmi_op_gen_stop_scan,
+ .gen_vdev_create = ath10k_wmi_op_gen_vdev_create,
+ .gen_vdev_delete = ath10k_wmi_op_gen_vdev_delete,
+ .gen_vdev_start = ath10k_wmi_op_gen_vdev_start,
+ .gen_vdev_stop = ath10k_wmi_op_gen_vdev_stop,
+ .gen_vdev_up = ath10k_wmi_op_gen_vdev_up,
+ .gen_vdev_down = ath10k_wmi_op_gen_vdev_down,
+ .gen_vdev_set_param = ath10k_wmi_op_gen_vdev_set_param,
+ .gen_vdev_install_key = ath10k_wmi_op_gen_vdev_install_key,
+ .gen_vdev_spectral_conf = ath10k_wmi_op_gen_vdev_spectral_conf,
+ .gen_vdev_spectral_enable = ath10k_wmi_op_gen_vdev_spectral_enable,
+ .gen_peer_create = ath10k_wmi_op_gen_peer_create,
+ .gen_peer_delete = ath10k_wmi_op_gen_peer_delete,
+ .gen_peer_flush = ath10k_wmi_op_gen_peer_flush,
+ .gen_peer_set_param = ath10k_wmi_op_gen_peer_set_param,
+ .gen_peer_assoc = ath10k_wmi_10_4_op_gen_peer_assoc,
+ .gen_set_psmode = ath10k_wmi_op_gen_set_psmode,
+ .gen_set_sta_ps = ath10k_wmi_op_gen_set_sta_ps,
+ .gen_set_ap_ps = ath10k_wmi_op_gen_set_ap_ps,
+ .gen_scan_chan_list = ath10k_wmi_op_gen_scan_chan_list,
+ .gen_beacon_dma = ath10k_wmi_op_gen_beacon_dma,
+ .gen_pdev_set_wmm = ath10k_wmi_op_gen_pdev_set_wmm,
+ .gen_force_fw_hang = ath10k_wmi_op_gen_force_fw_hang,
+ .gen_mgmt_tx = ath10k_wmi_op_gen_mgmt_tx,
+ .gen_dbglog_cfg = ath10k_wmi_10_4_op_gen_dbglog_cfg,
+ .gen_pktlog_enable = ath10k_wmi_op_gen_pktlog_enable,
+ .gen_pktlog_disable = ath10k_wmi_op_gen_pktlog_disable,
+ .gen_pdev_set_quiet_mode = ath10k_wmi_op_gen_pdev_set_quiet_mode,
+ .gen_addba_clear_resp = ath10k_wmi_op_gen_addba_clear_resp,
+ .gen_addba_send = ath10k_wmi_op_gen_addba_send,
+ .gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
+ .gen_delba_send = ath10k_wmi_op_gen_delba_send,
+ .fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+ .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
+ .gen_update_fw_tdls_state = ath10k_wmi_10_4_gen_update_fw_tdls_state,
+ .gen_tdls_peer_update = ath10k_wmi_10_4_gen_tdls_peer_update,
+ .gen_pdev_get_tpc_table_cmdid =
+ ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
+ .gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
+
+ /* shared with 10.2 */
+ .pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
+ .gen_request_stats = ath10k_wmi_op_gen_request_stats,
+ .gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+ .get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
+ .gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+};
+
+int ath10k_wmi_attach(struct ath10k *ar)
+{
+ switch (ar->running_fw->fw_file.wmi_op_version) {
+ case ATH10K_FW_WMI_OP_VERSION_10_4:
+ ar->wmi.ops = &wmi_10_4_ops;
+ ar->wmi.cmd = &wmi_10_4_cmd_map;
+ ar->wmi.vdev_param = &wmi_10_4_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10_4_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_2_4:
+ ar->wmi.cmd = &wmi_10_2_4_cmd_map;
+ ar->wmi.ops = &wmi_10_2_4_ops;
+ ar->wmi.vdev_param = &wmi_10_2_4_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10_2_4_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_2:
+ ar->wmi.cmd = &wmi_10_2_cmd_map;
+ ar->wmi.ops = &wmi_10_2_ops;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_10_2_peer_flags_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_10_1:
+ ar->wmi.cmd = &wmi_10x_cmd_map;
+ ar->wmi.ops = &wmi_10_1_ops;
+ ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_10x_peer_flags_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_MAIN:
+ ar->wmi.cmd = &wmi_cmd_map;
+ ar->wmi.ops = &wmi_ops;
+ ar->wmi.vdev_param = &wmi_vdev_param_map;
+ ar->wmi.pdev_param = &wmi_pdev_param_map;
+ ar->wmi.peer_flags = &wmi_peer_flags_map;
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_TLV:
+ ath10k_wmi_tlv_attach(ar);
+ break;
+ case ATH10K_FW_WMI_OP_VERSION_UNSET:
+ case ATH10K_FW_WMI_OP_VERSION_MAX:
+ ath10k_err(ar, "unsupported WMI op version: %d\n",
+ ar->running_fw->fw_file.wmi_op_version);
+ return -EINVAL;
+ }
+
+ init_completion(&ar->wmi.service_ready);
+ init_completion(&ar->wmi.unified_ready);
+ init_completion(&ar->wmi.barrier);
+ init_completion(&ar->wmi.radar_confirm);
+
+ INIT_WORK(&ar->svc_rdy_work, ath10k_wmi_event_service_ready_work);
+ INIT_WORK(&ar->radar_confirmation_work,
+ ath10k_radar_confirmation_work);
+
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ idr_init(&ar->wmi.mgmt_pending_tx);
+ }
+
+ return 0;
+}
+
+void ath10k_wmi_free_host_mem(struct ath10k *ar)
+{
+ int i;
+
+ /* free the host memory chunks requested by firmware */
+ for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
+ dma_free_coherent(ar->dev,
+ ar->wmi.mem_chunks[i].len,
+ ar->wmi.mem_chunks[i].vaddr,
+ ar->wmi.mem_chunks[i].paddr);
+ }
+
+ ar->wmi.num_mem_chunks = 0;
+}
+
+static int ath10k_wmi_mgmt_tx_clean_up_pending(int msdu_id, void *ptr,
+ void *ctx)
+{
+ struct ath10k_mgmt_tx_pkt_addr *pkt_addr = ptr;
+ struct ath10k *ar = ctx;
+ struct sk_buff *msdu;
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "force cleanup mgmt msdu_id %hu\n", msdu_id);
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+ msdu->len, DMA_TO_DEVICE);
+ ieee80211_free_txskb(ar->hw, msdu);
+
+ return 0;
+}
+
+void ath10k_wmi_detach(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
+ ar->running_fw->fw_file.fw_features)) {
+ spin_lock_bh(&ar->data_lock);
+ idr_for_each(&ar->wmi.mgmt_pending_tx,
+ ath10k_wmi_mgmt_tx_clean_up_pending, ar);
+ idr_destroy(&ar->wmi.mgmt_pending_tx);
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ cancel_work_sync(&ar->svc_rdy_work);
+
+ if (ar->svc_rdy_skb)
+ dev_kfree_skb(ar->svc_rdy_skb);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
new file mode 100644
index 000000000..1292f3235
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -0,0 +1,7167 @@
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _WMI_H_
+#define _WMI_H_
+
+#include <linux/types.h>
+#include <net/mac80211.h>
+
+/*
+ * This file specifies the WMI interface for the Unified Software
+ * Architecture.
+ *
+ * It includes definitions of all the commands and events. Commands are
+ * messages from the host to the target. Events and Replies are messages
+ * from the target to the host.
+ *
+ * Ownership of correctness in regards to WMI commands belongs to the host
+ * driver and the target is not required to validate parameters for value,
+ * proper range, or any other checking.
+ *
+ * Guidelines for extending this interface are below.
+ *
+ * 1. Add new WMI commands ONLY within the specified range - 0x9000 - 0x9fff
+ *
+ * 2. Use ONLY u32 type for defining member variables within WMI
+ * command/event structures. Do not use u8, u16, bool or
+ * enum types within these structures.
+ *
+ * 3. DO NOT define bit fields within structures. Implement bit fields
+ * using masks if necessary. Do not use the programming language's bit
+ * field definition.
+ *
+ * 4. Define macros for encode/decode of u8, u16 fields within
+ * the u32 variables. Use these macros for set/get of these fields.
+ * Try to use this to optimize the structure without bloating it with
+ * u32 variables for every lower sized field.
+ *
+ * 5. Do not use PACK/UNPACK attributes for the structures as each member
+ * variable is already 4-byte aligned by virtue of being a u32
+ * type.
+ *
+ * 6. Comment each parameter part of the WMI command/event structure by
+ * using the 2 stars at the beginning of C comment instead of one star to
+ * enable HTML document generation using Doxygen.
+ *
+ */
+
+/* Control Path */
+struct wmi_cmd_hdr {
+ __le32 cmd_id;
+} __packed;
+
+#define WMI_CMD_HDR_CMD_ID_MASK 0x00FFFFFF
+#define WMI_CMD_HDR_CMD_ID_LSB 0
+#define WMI_CMD_HDR_PLT_PRIV_MASK 0xFF000000
+#define WMI_CMD_HDR_PLT_PRIV_LSB 24
+
+#define HTC_PROTOCOL_VERSION 0x0002
+#define WMI_PROTOCOL_VERSION 0x0002
+
+/*
+ * There is no signed version of __le32, so for a temporary solution come
+ * up with our own version. The idea is from fs/ntfs/endian.h.
+ *
+ * Use a_ prefix so that it doesn't conflict if we get proper support to
+ * linux/types.h.
+ */
+typedef __s32 __bitwise a_sle32;
+
+static inline a_sle32 a_cpu_to_sle32(s32 val)
+{
+ return (__force a_sle32)cpu_to_le32(val);
+}
+
+static inline s32 a_sle32_to_cpu(a_sle32 val)
+{
+ return le32_to_cpu((__force __le32)val);
+}
+
+enum wmi_service {
+ WMI_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_DFS,
+ WMI_SERVICE_11AC,
+ WMI_SERVICE_BLOCKACK,
+ WMI_SERVICE_PHYERR,
+ WMI_SERVICE_BCN_FILTER,
+ WMI_SERVICE_RTT,
+ WMI_SERVICE_RATECTRL,
+ WMI_SERVICE_WOW,
+ WMI_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_NLO,
+ WMI_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_SCAN_SCH,
+ WMI_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CHATTER,
+ WMI_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_GPIO,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_TX_ENCAP,
+ WMI_SERVICE_BURST,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_ROAM_SCAN_OFFLOAD,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_EARLY_RX,
+ WMI_SERVICE_STA_SMPS,
+ WMI_SERVICE_FWTEST,
+ WMI_SERVICE_STA_WMMAC,
+ WMI_SERVICE_TDLS,
+ WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE,
+ WMI_SERVICE_ADAPTIVE_OCS,
+ WMI_SERVICE_BA_SSN_SUPPORT,
+ WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE,
+ WMI_SERVICE_WLAN_HB,
+ WMI_SERVICE_LTE_ANT_SHARE_SUPPORT,
+ WMI_SERVICE_BATCH_SCAN,
+ WMI_SERVICE_QPOWER,
+ WMI_SERVICE_PLMREQ,
+ WMI_SERVICE_THERMAL_MGMT,
+ WMI_SERVICE_RMC,
+ WMI_SERVICE_MHF_OFFLOAD,
+ WMI_SERVICE_COEX_SAR,
+ WMI_SERVICE_BCN_TXRATE_OVERRIDE,
+ WMI_SERVICE_NAN,
+ WMI_SERVICE_L1SS_STAT,
+ WMI_SERVICE_ESTIMATE_LINKSPEED,
+ WMI_SERVICE_OBSS_SCAN,
+ WMI_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_IBSS_PWRSAVE,
+ WMI_SERVICE_LPASS,
+ WMI_SERVICE_EXTSCAN,
+ WMI_SERVICE_D0WOW,
+ WMI_SERVICE_HSOFFLOAD,
+ WMI_SERVICE_ROAM_HO_OFFLOAD,
+ WMI_SERVICE_RX_FULL_REORDER,
+ WMI_SERVICE_DHCP_OFFLOAD,
+ WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT,
+ WMI_SERVICE_MDNS_OFFLOAD,
+ WMI_SERVICE_SAP_AUTH_OFFLOAD,
+ WMI_SERVICE_ATF,
+ WMI_SERVICE_COEX_GPIO,
+ WMI_SERVICE_ENHANCED_PROXY_STA,
+ WMI_SERVICE_TT,
+ WMI_SERVICE_PEER_CACHING,
+ WMI_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_NON_11S,
+ WMI_SERVICE_PEER_STATS,
+ WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_DYNAMIC,
+ WMI_SERVICE_VDEV_RX_FILTER,
+ WMI_SERVICE_BTCOEX,
+ WMI_SERVICE_CHECK_CAL_VERSION,
+ WMI_SERVICE_DBGLOG_WARN2,
+ WMI_SERVICE_BTCOEX_DUTY_CYCLE,
+ WMI_SERVICE_4_WIRE_COEX_SUPPORT,
+ WMI_SERVICE_EXTENDED_NSS_SUPPORT,
+ WMI_SERVICE_PROG_GPIO_BAND_SELECT,
+ WMI_SERVICE_SMART_LOGGING_SUPPORT,
+ WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_SERVICE_MGMT_TX_WMI,
+ WMI_SERVICE_TDLS_WIDER_BANDWIDTH,
+ WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_SERVICE_HOST_DFS_CHECK_SUPPORT,
+ WMI_SERVICE_TPC_STATS_FINAL,
+ WMI_SERVICE_RESET_CHIP,
+ WMI_SERVICE_SPOOF_MAC_SUPPORT,
+
+ /* keep last */
+ WMI_SERVICE_MAX,
+};
+
+enum wmi_10x_service {
+ WMI_10X_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_10X_SERVICE_SCAN_OFFLOAD,
+ WMI_10X_SERVICE_ROAM_OFFLOAD,
+ WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_10X_SERVICE_STA_PWRSAVE,
+ WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_10X_SERVICE_AP_UAPSD,
+ WMI_10X_SERVICE_AP_DFS,
+ WMI_10X_SERVICE_11AC,
+ WMI_10X_SERVICE_BLOCKACK,
+ WMI_10X_SERVICE_PHYERR,
+ WMI_10X_SERVICE_BCN_FILTER,
+ WMI_10X_SERVICE_RTT,
+ WMI_10X_SERVICE_RATECTRL,
+ WMI_10X_SERVICE_WOW,
+ WMI_10X_SERVICE_RATECTRL_CACHE,
+ WMI_10X_SERVICE_IRAM_TIDS,
+ WMI_10X_SERVICE_BURST,
+
+ /* introduced in 10.2 */
+ WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_10X_SERVICE_FORCE_FW_HANG,
+ WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_10X_SERVICE_ATF,
+ WMI_10X_SERVICE_COEX_GPIO,
+ WMI_10X_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_10X_SERVICE_MESH,
+ WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_10X_SERVICE_PEER_STATS,
+ WMI_10X_SERVICE_RESET_CHIP,
+ WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+};
+
+enum wmi_main_service {
+ WMI_MAIN_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_MAIN_SERVICE_SCAN_OFFLOAD,
+ WMI_MAIN_SERVICE_ROAM_OFFLOAD,
+ WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_MAIN_SERVICE_STA_PWRSAVE,
+ WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_MAIN_SERVICE_AP_UAPSD,
+ WMI_MAIN_SERVICE_AP_DFS,
+ WMI_MAIN_SERVICE_11AC,
+ WMI_MAIN_SERVICE_BLOCKACK,
+ WMI_MAIN_SERVICE_PHYERR,
+ WMI_MAIN_SERVICE_BCN_FILTER,
+ WMI_MAIN_SERVICE_RTT,
+ WMI_MAIN_SERVICE_RATECTRL,
+ WMI_MAIN_SERVICE_WOW,
+ WMI_MAIN_SERVICE_RATECTRL_CACHE,
+ WMI_MAIN_SERVICE_IRAM_TIDS,
+ WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
+ WMI_MAIN_SERVICE_NLO,
+ WMI_MAIN_SERVICE_GTK_OFFLOAD,
+ WMI_MAIN_SERVICE_SCAN_SCH,
+ WMI_MAIN_SERVICE_CSA_OFFLOAD,
+ WMI_MAIN_SERVICE_CHATTER,
+ WMI_MAIN_SERVICE_COEX_FREQAVOID,
+ WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
+ WMI_MAIN_SERVICE_FORCE_FW_HANG,
+ WMI_MAIN_SERVICE_GPIO,
+ WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
+ WMI_MAIN_SERVICE_TX_ENCAP,
+};
+
+enum wmi_10_4_service {
+ WMI_10_4_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_10_4_SERVICE_SCAN_OFFLOAD,
+ WMI_10_4_SERVICE_ROAM_OFFLOAD,
+ WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_10_4_SERVICE_STA_PWRSAVE,
+ WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_10_4_SERVICE_AP_UAPSD,
+ WMI_10_4_SERVICE_AP_DFS,
+ WMI_10_4_SERVICE_11AC,
+ WMI_10_4_SERVICE_BLOCKACK,
+ WMI_10_4_SERVICE_PHYERR,
+ WMI_10_4_SERVICE_BCN_FILTER,
+ WMI_10_4_SERVICE_RTT,
+ WMI_10_4_SERVICE_RATECTRL,
+ WMI_10_4_SERVICE_WOW,
+ WMI_10_4_SERVICE_RATECTRL_CACHE,
+ WMI_10_4_SERVICE_IRAM_TIDS,
+ WMI_10_4_SERVICE_BURST,
+ WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_10_4_SERVICE_GTK_OFFLOAD,
+ WMI_10_4_SERVICE_SCAN_SCH,
+ WMI_10_4_SERVICE_CSA_OFFLOAD,
+ WMI_10_4_SERVICE_CHATTER,
+ WMI_10_4_SERVICE_COEX_FREQAVOID,
+ WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+ WMI_10_4_SERVICE_FORCE_FW_HANG,
+ WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_10_4_SERVICE_GPIO,
+ WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+ WMI_10_4_SERVICE_TX_ENCAP,
+ WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_10_4_SERVICE_EARLY_RX,
+ WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+ WMI_10_4_SERVICE_TT,
+ WMI_10_4_SERVICE_ATF,
+ WMI_10_4_SERVICE_PEER_CACHING,
+ WMI_10_4_SERVICE_COEX_GPIO,
+ WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_10_4_SERVICE_MESH_NON_11S,
+ WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_10_4_SERVICE_PEER_STATS,
+ WMI_10_4_SERVICE_MESH_11S,
+ WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+ WMI_10_4_SERVICE_VDEV_RX_FILTER,
+ WMI_10_4_SERVICE_BTCOEX,
+ WMI_10_4_SERVICE_CHECK_CAL_VERSION,
+ WMI_10_4_SERVICE_DBGLOG_WARN2,
+ WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE,
+ WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT,
+ WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT,
+ WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT,
+ WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT,
+ WMI_10_4_SERVICE_TDLS,
+ WMI_10_4_SERVICE_TDLS_OFFCHAN,
+ WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
+ WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT,
+ WMI_10_4_SERVICE_TPC_STATS_FINAL,
+};
+
+static inline char *wmi_service_name(int service_id)
+{
+#define SVCSTR(x) case x: return #x
+
+ switch (service_id) {
+ SVCSTR(WMI_SERVICE_BEACON_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SCAN_OFFLOAD);
+ SVCSTR(WMI_SERVICE_ROAM_OFFLOAD);
+ SVCSTR(WMI_SERVICE_BCN_MISS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_STA_PWRSAVE);
+ SVCSTR(WMI_SERVICE_STA_ADVANCED_PWRSAVE);
+ SVCSTR(WMI_SERVICE_AP_UAPSD);
+ SVCSTR(WMI_SERVICE_AP_DFS);
+ SVCSTR(WMI_SERVICE_11AC);
+ SVCSTR(WMI_SERVICE_BLOCKACK);
+ SVCSTR(WMI_SERVICE_PHYERR);
+ SVCSTR(WMI_SERVICE_BCN_FILTER);
+ SVCSTR(WMI_SERVICE_RTT);
+ SVCSTR(WMI_SERVICE_RATECTRL);
+ SVCSTR(WMI_SERVICE_WOW);
+ SVCSTR(WMI_SERVICE_RATECTRL_CACHE);
+ SVCSTR(WMI_SERVICE_IRAM_TIDS);
+ SVCSTR(WMI_SERVICE_ARPNS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_NLO);
+ SVCSTR(WMI_SERVICE_GTK_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SCAN_SCH);
+ SVCSTR(WMI_SERVICE_CSA_OFFLOAD);
+ SVCSTR(WMI_SERVICE_CHATTER);
+ SVCSTR(WMI_SERVICE_COEX_FREQAVOID);
+ SVCSTR(WMI_SERVICE_PACKET_POWER_SAVE);
+ SVCSTR(WMI_SERVICE_FORCE_FW_HANG);
+ SVCSTR(WMI_SERVICE_GPIO);
+ SVCSTR(WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM);
+ SVCSTR(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG);
+ SVCSTR(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG);
+ SVCSTR(WMI_SERVICE_STA_KEEP_ALIVE);
+ SVCSTR(WMI_SERVICE_TX_ENCAP);
+ SVCSTR(WMI_SERVICE_BURST);
+ SVCSTR(WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT);
+ SVCSTR(WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT);
+ SVCSTR(WMI_SERVICE_ROAM_SCAN_OFFLOAD);
+ SVCSTR(WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC);
+ SVCSTR(WMI_SERVICE_EARLY_RX);
+ SVCSTR(WMI_SERVICE_STA_SMPS);
+ SVCSTR(WMI_SERVICE_FWTEST);
+ SVCSTR(WMI_SERVICE_STA_WMMAC);
+ SVCSTR(WMI_SERVICE_TDLS);
+ SVCSTR(WMI_SERVICE_MCC_BCN_INTERVAL_CHANGE);
+ SVCSTR(WMI_SERVICE_ADAPTIVE_OCS);
+ SVCSTR(WMI_SERVICE_BA_SSN_SUPPORT);
+ SVCSTR(WMI_SERVICE_FILTER_IPSEC_NATKEEPALIVE);
+ SVCSTR(WMI_SERVICE_WLAN_HB);
+ SVCSTR(WMI_SERVICE_LTE_ANT_SHARE_SUPPORT);
+ SVCSTR(WMI_SERVICE_BATCH_SCAN);
+ SVCSTR(WMI_SERVICE_QPOWER);
+ SVCSTR(WMI_SERVICE_PLMREQ);
+ SVCSTR(WMI_SERVICE_THERMAL_MGMT);
+ SVCSTR(WMI_SERVICE_RMC);
+ SVCSTR(WMI_SERVICE_MHF_OFFLOAD);
+ SVCSTR(WMI_SERVICE_COEX_SAR);
+ SVCSTR(WMI_SERVICE_BCN_TXRATE_OVERRIDE);
+ SVCSTR(WMI_SERVICE_NAN);
+ SVCSTR(WMI_SERVICE_L1SS_STAT);
+ SVCSTR(WMI_SERVICE_ESTIMATE_LINKSPEED);
+ SVCSTR(WMI_SERVICE_OBSS_SCAN);
+ SVCSTR(WMI_SERVICE_TDLS_OFFCHAN);
+ SVCSTR(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA);
+ SVCSTR(WMI_SERVICE_TDLS_UAPSD_SLEEP_STA);
+ SVCSTR(WMI_SERVICE_IBSS_PWRSAVE);
+ SVCSTR(WMI_SERVICE_LPASS);
+ SVCSTR(WMI_SERVICE_EXTSCAN);
+ SVCSTR(WMI_SERVICE_D0WOW);
+ SVCSTR(WMI_SERVICE_HSOFFLOAD);
+ SVCSTR(WMI_SERVICE_ROAM_HO_OFFLOAD);
+ SVCSTR(WMI_SERVICE_RX_FULL_REORDER);
+ SVCSTR(WMI_SERVICE_DHCP_OFFLOAD);
+ SVCSTR(WMI_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT);
+ SVCSTR(WMI_SERVICE_MDNS_OFFLOAD);
+ SVCSTR(WMI_SERVICE_SAP_AUTH_OFFLOAD);
+ SVCSTR(WMI_SERVICE_ATF);
+ SVCSTR(WMI_SERVICE_COEX_GPIO);
+ SVCSTR(WMI_SERVICE_ENHANCED_PROXY_STA);
+ SVCSTR(WMI_SERVICE_TT);
+ SVCSTR(WMI_SERVICE_PEER_CACHING);
+ SVCSTR(WMI_SERVICE_AUX_SPECTRAL_INTF);
+ SVCSTR(WMI_SERVICE_AUX_CHAN_LOAD_INTF);
+ SVCSTR(WMI_SERVICE_BSS_CHANNEL_INFO_64);
+ SVCSTR(WMI_SERVICE_EXT_RES_CFG_SUPPORT);
+ SVCSTR(WMI_SERVICE_MESH_11S);
+ SVCSTR(WMI_SERVICE_MESH_NON_11S);
+ SVCSTR(WMI_SERVICE_PEER_STATS);
+ SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+ SVCSTR(WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
+ SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
+ SVCSTR(WMI_SERVICE_VDEV_RX_FILTER);
+ SVCSTR(WMI_SERVICE_CHECK_CAL_VERSION);
+ SVCSTR(WMI_SERVICE_DBGLOG_WARN2);
+ SVCSTR(WMI_SERVICE_BTCOEX_DUTY_CYCLE);
+ SVCSTR(WMI_SERVICE_4_WIRE_COEX_SUPPORT);
+ SVCSTR(WMI_SERVICE_EXTENDED_NSS_SUPPORT);
+ SVCSTR(WMI_SERVICE_PROG_GPIO_BAND_SELECT);
+ SVCSTR(WMI_SERVICE_SMART_LOGGING_SUPPORT);
+ SVCSTR(WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE);
+ SVCSTR(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY);
+ SVCSTR(WMI_SERVICE_TDLS_WIDER_BANDWIDTH);
+ SVCSTR(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS);
+ SVCSTR(WMI_SERVICE_HOST_DFS_CHECK_SUPPORT);
+ SVCSTR(WMI_SERVICE_TPC_STATS_FINAL);
+ SVCSTR(WMI_SERVICE_RESET_CHIP);
+ default:
+ return NULL;
+ }
+
+#undef SVCSTR
+}
+
+#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) < (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+ BIT((svc_id) % (sizeof(u32))))
+
+/* This extension is required to accommodate new services, current limit
+ * for wmi_services is 64 as target is using only 4-bits of each 32-bit
+ * wmi_service word. Extending this to make use of remaining unused bits
+ * for new services.
+ */
+#define WMI_EXT_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
+ ((svc_id) >= (len) && \
+ __le32_to_cpu((wmi_svc_bmap)[((svc_id) - (len)) / 28]) & \
+ BIT(((((svc_id) - (len)) % 28) & 0x1f) + 4))
+
+#define SVCMAP(x, y, len) \
+ do { \
+ if ((WMI_SERVICE_IS_ENABLED((in), (x), (len))) || \
+ (WMI_EXT_SERVICE_IS_ENABLED((in), (x), (len)))) \
+ __set_bit(y, out); \
+ } while (0)
+
+static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_10X_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_10X_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_10X_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_10X_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_10X_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_10X_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_10X_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_10X_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_10X_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_10X_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_10X_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_10X_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_10X_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_10X_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_10X_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_10X_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_ATF,
+ WMI_SERVICE_ATF, len);
+ SVCMAP(WMI_10X_SERVICE_COEX_GPIO,
+ WMI_SERVICE_COEX_GPIO, len);
+ SVCMAP(WMI_10X_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+ SVCMAP(WMI_10X_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+ SVCMAP(WMI_10X_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+ SVCMAP(WMI_10X_SERVICE_MESH,
+ WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10X_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
+ SVCMAP(WMI_10X_SERVICE_PEER_STATS,
+ WMI_SERVICE_PEER_STATS, len);
+ SVCMAP(WMI_10X_SERVICE_RESET_CHIP,
+ WMI_SERVICE_RESET_CHIP, len);
+ SVCMAP(WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len);
+}
+
+static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_MAIN_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_MAIN_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_MAIN_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_MAIN_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_MAIN_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_MAIN_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_MAIN_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_MAIN_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_MAIN_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_MAIN_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_MAIN_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_MAIN_SERVICE_ARPNS_OFFLOAD,
+ WMI_SERVICE_ARPNS_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_NLO,
+ WMI_SERVICE_NLO, len);
+ SVCMAP(WMI_MAIN_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_MAIN_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_MAIN_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_MAIN_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_MAIN_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_MAIN_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_DTIM_PS_MODULATED_DTIM,
+ WMI_SERVICE_STA_DTIM_PS_MODULATED_DTIM, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_MAIN_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_MAIN_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+}
+
+static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
+ size_t len)
+{
+ SVCMAP(WMI_10_4_SERVICE_BEACON_OFFLOAD,
+ WMI_SERVICE_BEACON_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_SCAN_OFFLOAD,
+ WMI_SERVICE_SCAN_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_ROAM_OFFLOAD,
+ WMI_SERVICE_ROAM_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_BCN_MISS_OFFLOAD,
+ WMI_SERVICE_BCN_MISS_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_PWRSAVE,
+ WMI_SERVICE_STA_PWRSAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_ADVANCED_PWRSAVE,
+ WMI_SERVICE_STA_ADVANCED_PWRSAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_UAPSD,
+ WMI_SERVICE_AP_UAPSD, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_DFS,
+ WMI_SERVICE_AP_DFS, len);
+ SVCMAP(WMI_10_4_SERVICE_11AC,
+ WMI_SERVICE_11AC, len);
+ SVCMAP(WMI_10_4_SERVICE_BLOCKACK,
+ WMI_SERVICE_BLOCKACK, len);
+ SVCMAP(WMI_10_4_SERVICE_PHYERR,
+ WMI_SERVICE_PHYERR, len);
+ SVCMAP(WMI_10_4_SERVICE_BCN_FILTER,
+ WMI_SERVICE_BCN_FILTER, len);
+ SVCMAP(WMI_10_4_SERVICE_RTT,
+ WMI_SERVICE_RTT, len);
+ SVCMAP(WMI_10_4_SERVICE_RATECTRL,
+ WMI_SERVICE_RATECTRL, len);
+ SVCMAP(WMI_10_4_SERVICE_WOW,
+ WMI_SERVICE_WOW, len);
+ SVCMAP(WMI_10_4_SERVICE_RATECTRL_CACHE,
+ WMI_SERVICE_RATECTRL_CACHE, len);
+ SVCMAP(WMI_10_4_SERVICE_IRAM_TIDS,
+ WMI_SERVICE_IRAM_TIDS, len);
+ SVCMAP(WMI_10_4_SERVICE_BURST,
+ WMI_SERVICE_BURST, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_SW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_SW_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_GTK_OFFLOAD,
+ WMI_SERVICE_GTK_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_SCAN_SCH,
+ WMI_SERVICE_SCAN_SCH, len);
+ SVCMAP(WMI_10_4_SERVICE_CSA_OFFLOAD,
+ WMI_SERVICE_CSA_OFFLOAD, len);
+ SVCMAP(WMI_10_4_SERVICE_CHATTER,
+ WMI_SERVICE_CHATTER, len);
+ SVCMAP(WMI_10_4_SERVICE_COEX_FREQAVOID,
+ WMI_SERVICE_COEX_FREQAVOID, len);
+ SVCMAP(WMI_10_4_SERVICE_PACKET_POWER_SAVE,
+ WMI_SERVICE_PACKET_POWER_SAVE, len);
+ SVCMAP(WMI_10_4_SERVICE_FORCE_FW_HANG,
+ WMI_SERVICE_FORCE_FW_HANG, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_ANTENNA_HW_SUPPORT,
+ WMI_SERVICE_SMART_ANTENNA_HW_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_GPIO,
+ WMI_SERVICE_GPIO, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_UAPSD_VAR_AUTO_TRIG,
+ WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, len);
+ SVCMAP(WMI_10_4_SERVICE_STA_KEEP_ALIVE,
+ WMI_SERVICE_STA_KEEP_ALIVE, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_ENCAP,
+ WMI_SERVICE_TX_ENCAP, len);
+ SVCMAP(WMI_10_4_SERVICE_AP_PS_DETECT_OUT_OF_SYNC,
+ WMI_SERVICE_AP_PS_DETECT_OUT_OF_SYNC, len);
+ SVCMAP(WMI_10_4_SERVICE_EARLY_RX,
+ WMI_SERVICE_EARLY_RX, len);
+ SVCMAP(WMI_10_4_SERVICE_ENHANCED_PROXY_STA,
+ WMI_SERVICE_ENHANCED_PROXY_STA, len);
+ SVCMAP(WMI_10_4_SERVICE_TT,
+ WMI_SERVICE_TT, len);
+ SVCMAP(WMI_10_4_SERVICE_ATF,
+ WMI_SERVICE_ATF, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_CACHING,
+ WMI_SERVICE_PEER_CACHING, len);
+ SVCMAP(WMI_10_4_SERVICE_COEX_GPIO,
+ WMI_SERVICE_COEX_GPIO, len);
+ SVCMAP(WMI_10_4_SERVICE_AUX_SPECTRAL_INTF,
+ WMI_SERVICE_AUX_SPECTRAL_INTF, len);
+ SVCMAP(WMI_10_4_SERVICE_AUX_CHAN_LOAD_INTF,
+ WMI_SERVICE_AUX_CHAN_LOAD_INTF, len);
+ SVCMAP(WMI_10_4_SERVICE_BSS_CHANNEL_INFO_64,
+ WMI_SERVICE_BSS_CHANNEL_INFO_64, len);
+ SVCMAP(WMI_10_4_SERVICE_EXT_RES_CFG_SUPPORT,
+ WMI_SERVICE_EXT_RES_CFG_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_MESH_NON_11S,
+ WMI_SERVICE_MESH_NON_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_RESTRT_CHNL_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_STATS,
+ WMI_SERVICE_PEER_STATS, len);
+ SVCMAP(WMI_10_4_SERVICE_MESH_11S,
+ WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_PERIODIC_CHAN_STAT_SUPPORT,
+ WMI_SERVICE_PERIODIC_CHAN_STAT_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_PUSH_PULL, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+ WMI_SERVICE_TX_MODE_DYNAMIC, len);
+ SVCMAP(WMI_10_4_SERVICE_VDEV_RX_FILTER,
+ WMI_SERVICE_VDEV_RX_FILTER, len);
+ SVCMAP(WMI_10_4_SERVICE_BTCOEX,
+ WMI_SERVICE_BTCOEX, len);
+ SVCMAP(WMI_10_4_SERVICE_CHECK_CAL_VERSION,
+ WMI_SERVICE_CHECK_CAL_VERSION, len);
+ SVCMAP(WMI_10_4_SERVICE_DBGLOG_WARN2,
+ WMI_SERVICE_DBGLOG_WARN2, len);
+ SVCMAP(WMI_10_4_SERVICE_BTCOEX_DUTY_CYCLE,
+ WMI_SERVICE_BTCOEX_DUTY_CYCLE, len);
+ SVCMAP(WMI_10_4_SERVICE_4_WIRE_COEX_SUPPORT,
+ WMI_SERVICE_4_WIRE_COEX_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_EXTENDED_NSS_SUPPORT,
+ WMI_SERVICE_EXTENDED_NSS_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_PROG_GPIO_BAND_SELECT,
+ WMI_SERVICE_PROG_GPIO_BAND_SELECT, len);
+ SVCMAP(WMI_10_4_SERVICE_SMART_LOGGING_SUPPORT,
+ WMI_SERVICE_SMART_LOGGING_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS,
+ WMI_SERVICE_TDLS, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_OFFCHAN,
+ WMI_SERVICE_TDLS_OFFCHAN, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_BUFFER_STA,
+ WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_UAPSD_SLEEP_STA,
+ WMI_SERVICE_TDLS_UAPSD_SLEEP_STA, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE,
+ WMI_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_EXPLICIT_MODE_ONLY,
+ WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, len);
+ SVCMAP(WMI_10_4_SERVICE_TDLS_WIDER_BANDWIDTH,
+ WMI_SERVICE_TDLS_WIDER_BANDWIDTH, len);
+ SVCMAP(WMI_10_4_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
+ WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len);
+ SVCMAP(WMI_10_4_SERVICE_HOST_DFS_CHECK_SUPPORT,
+ WMI_SERVICE_HOST_DFS_CHECK_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_TPC_STATS_FINAL,
+ WMI_SERVICE_TPC_STATS_FINAL, len);
+}
+
+#undef SVCMAP
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+ union {
+ u8 addr[6];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_cmd_map {
+ u32 init_cmdid;
+ u32 start_scan_cmdid;
+ u32 stop_scan_cmdid;
+ u32 scan_chan_list_cmdid;
+ u32 scan_sch_prio_tbl_cmdid;
+ u32 scan_prob_req_oui_cmdid;
+ u32 pdev_set_regdomain_cmdid;
+ u32 pdev_set_channel_cmdid;
+ u32 pdev_set_param_cmdid;
+ u32 pdev_pktlog_enable_cmdid;
+ u32 pdev_pktlog_disable_cmdid;
+ u32 pdev_set_wmm_params_cmdid;
+ u32 pdev_set_ht_cap_ie_cmdid;
+ u32 pdev_set_vht_cap_ie_cmdid;
+ u32 pdev_set_dscp_tid_map_cmdid;
+ u32 pdev_set_quiet_mode_cmdid;
+ u32 pdev_green_ap_ps_enable_cmdid;
+ u32 pdev_get_tpc_config_cmdid;
+ u32 pdev_set_base_macaddr_cmdid;
+ u32 vdev_create_cmdid;
+ u32 vdev_delete_cmdid;
+ u32 vdev_start_request_cmdid;
+ u32 vdev_restart_request_cmdid;
+ u32 vdev_up_cmdid;
+ u32 vdev_stop_cmdid;
+ u32 vdev_down_cmdid;
+ u32 vdev_set_param_cmdid;
+ u32 vdev_install_key_cmdid;
+ u32 peer_create_cmdid;
+ u32 peer_delete_cmdid;
+ u32 peer_flush_tids_cmdid;
+ u32 peer_set_param_cmdid;
+ u32 peer_assoc_cmdid;
+ u32 peer_add_wds_entry_cmdid;
+ u32 peer_remove_wds_entry_cmdid;
+ u32 peer_mcast_group_cmdid;
+ u32 bcn_tx_cmdid;
+ u32 pdev_send_bcn_cmdid;
+ u32 bcn_tmpl_cmdid;
+ u32 bcn_filter_rx_cmdid;
+ u32 prb_req_filter_rx_cmdid;
+ u32 mgmt_tx_cmdid;
+ u32 mgmt_tx_send_cmdid;
+ u32 prb_tmpl_cmdid;
+ u32 addba_clear_resp_cmdid;
+ u32 addba_send_cmdid;
+ u32 addba_status_cmdid;
+ u32 delba_send_cmdid;
+ u32 addba_set_resp_cmdid;
+ u32 send_singleamsdu_cmdid;
+ u32 sta_powersave_mode_cmdid;
+ u32 sta_powersave_param_cmdid;
+ u32 sta_mimo_ps_mode_cmdid;
+ u32 pdev_dfs_enable_cmdid;
+ u32 pdev_dfs_disable_cmdid;
+ u32 roam_scan_mode;
+ u32 roam_scan_rssi_threshold;
+ u32 roam_scan_period;
+ u32 roam_scan_rssi_change_threshold;
+ u32 roam_ap_profile;
+ u32 ofl_scan_add_ap_profile;
+ u32 ofl_scan_remove_ap_profile;
+ u32 ofl_scan_period;
+ u32 p2p_dev_set_device_info;
+ u32 p2p_dev_set_discoverability;
+ u32 p2p_go_set_beacon_ie;
+ u32 p2p_go_set_probe_resp_ie;
+ u32 p2p_set_vendor_ie_data_cmdid;
+ u32 ap_ps_peer_param_cmdid;
+ u32 ap_ps_peer_uapsd_coex_cmdid;
+ u32 peer_rate_retry_sched_cmdid;
+ u32 wlan_profile_trigger_cmdid;
+ u32 wlan_profile_set_hist_intvl_cmdid;
+ u32 wlan_profile_get_profile_data_cmdid;
+ u32 wlan_profile_enable_profile_id_cmdid;
+ u32 wlan_profile_list_profile_id_cmdid;
+ u32 pdev_suspend_cmdid;
+ u32 pdev_resume_cmdid;
+ u32 add_bcn_filter_cmdid;
+ u32 rmv_bcn_filter_cmdid;
+ u32 wow_add_wake_pattern_cmdid;
+ u32 wow_del_wake_pattern_cmdid;
+ u32 wow_enable_disable_wake_event_cmdid;
+ u32 wow_enable_cmdid;
+ u32 wow_hostwakeup_from_sleep_cmdid;
+ u32 rtt_measreq_cmdid;
+ u32 rtt_tsf_cmdid;
+ u32 vdev_spectral_scan_configure_cmdid;
+ u32 vdev_spectral_scan_enable_cmdid;
+ u32 request_stats_cmdid;
+ u32 set_arp_ns_offload_cmdid;
+ u32 network_list_offload_config_cmdid;
+ u32 gtk_offload_cmdid;
+ u32 csa_offload_enable_cmdid;
+ u32 csa_offload_chanswitch_cmdid;
+ u32 chatter_set_mode_cmdid;
+ u32 peer_tid_addba_cmdid;
+ u32 peer_tid_delba_cmdid;
+ u32 sta_dtim_ps_method_cmdid;
+ u32 sta_uapsd_auto_trig_cmdid;
+ u32 sta_keepalive_cmd;
+ u32 echo_cmdid;
+ u32 pdev_utf_cmdid;
+ u32 dbglog_cfg_cmdid;
+ u32 pdev_qvit_cmdid;
+ u32 pdev_ftm_intg_cmdid;
+ u32 vdev_set_keepalive_cmdid;
+ u32 vdev_get_keepalive_cmdid;
+ u32 force_fw_hang_cmdid;
+ u32 gpio_config_cmdid;
+ u32 gpio_output_cmdid;
+ u32 pdev_get_temperature_cmdid;
+ u32 vdev_set_wmm_params_cmdid;
+ u32 tdls_set_state_cmdid;
+ u32 tdls_peer_update_cmdid;
+ u32 adaptive_qcs_cmdid;
+ u32 scan_update_request_cmdid;
+ u32 vdev_standby_response_cmdid;
+ u32 vdev_resume_response_cmdid;
+ u32 wlan_peer_caching_add_peer_cmdid;
+ u32 wlan_peer_caching_evict_peer_cmdid;
+ u32 wlan_peer_caching_restore_peer_cmdid;
+ u32 wlan_peer_caching_print_all_peers_info_cmdid;
+ u32 peer_update_wds_entry_cmdid;
+ u32 peer_add_proxy_sta_entry_cmdid;
+ u32 rtt_keepalive_cmdid;
+ u32 oem_req_cmdid;
+ u32 nan_cmdid;
+ u32 vdev_ratemask_cmdid;
+ u32 qboost_cfg_cmdid;
+ u32 pdev_smart_ant_enable_cmdid;
+ u32 pdev_smart_ant_set_rx_antenna_cmdid;
+ u32 peer_smart_ant_set_tx_antenna_cmdid;
+ u32 peer_smart_ant_set_train_info_cmdid;
+ u32 peer_smart_ant_set_node_config_ops_cmdid;
+ u32 pdev_set_antenna_switch_table_cmdid;
+ u32 pdev_set_ctl_table_cmdid;
+ u32 pdev_set_mimogain_table_cmdid;
+ u32 pdev_ratepwr_table_cmdid;
+ u32 pdev_ratepwr_chainmsk_table_cmdid;
+ u32 pdev_fips_cmdid;
+ u32 tt_set_conf_cmdid;
+ u32 fwtest_cmdid;
+ u32 vdev_atf_request_cmdid;
+ u32 peer_atf_request_cmdid;
+ u32 pdev_get_ani_cck_config_cmdid;
+ u32 pdev_get_ani_ofdm_config_cmdid;
+ u32 pdev_reserve_ast_entry_cmdid;
+ u32 pdev_get_nfcal_power_cmdid;
+ u32 pdev_get_tpc_cmdid;
+ u32 pdev_get_ast_info_cmdid;
+ u32 vdev_set_dscp_tid_map_cmdid;
+ u32 pdev_get_info_cmdid;
+ u32 vdev_get_info_cmdid;
+ u32 vdev_filter_neighbor_rx_packets_cmdid;
+ u32 mu_cal_start_cmdid;
+ u32 set_cca_params_cmdid;
+ u32 pdev_bss_chan_info_request_cmdid;
+ u32 pdev_enable_adaptive_cca_cmdid;
+ u32 ext_resource_cfg_cmdid;
+ u32 vdev_set_ie_cmdid;
+ u32 set_lteu_config_cmdid;
+ u32 atf_ssid_grouping_request_cmdid;
+ u32 peer_atf_ext_request_cmdid;
+ u32 set_periodic_channel_stats_cfg_cmdid;
+ u32 peer_bwf_request_cmdid;
+ u32 btcoex_cfg_cmdid;
+ u32 peer_tx_mu_txmit_count_cmdid;
+ u32 peer_tx_mu_txmit_rstcnt_cmdid;
+ u32 peer_gid_userpos_list_cmdid;
+ u32 pdev_check_cal_version_cmdid;
+ u32 coex_version_cfg_cmid;
+ u32 pdev_get_rx_filter_cmdid;
+ u32 pdev_extended_nss_cfg_cmdid;
+ u32 vdev_set_scan_nac_rssi_cmdid;
+ u32 prog_gpio_band_select_cmdid;
+ u32 config_smart_logging_cmdid;
+ u32 debug_fatal_condition_cmdid;
+ u32 get_tsf_timer_cmdid;
+ u32 pdev_get_tpc_table_cmdid;
+ u32 vdev_sifs_trigger_time_cmdid;
+ u32 pdev_wds_entry_list_cmdid;
+ u32 tdls_set_offchan_mode_cmdid;
+ u32 radar_found_cmdid;
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV,
+ WMI_GRP_VDEV,
+ WMI_GRP_PEER,
+ WMI_GRP_MGMT,
+ WMI_GRP_BA_NEG,
+ WMI_GRP_STA_PS,
+ WMI_GRP_DFS,
+ WMI_GRP_ROAM,
+ WMI_GRP_OFL_SCAN,
+ WMI_GRP_P2P,
+ WMI_GRP_AP_PS,
+ WMI_GRP_RATE_CTRL,
+ WMI_GRP_PROFILE,
+ WMI_GRP_SUSPEND,
+ WMI_GRP_BCN_FILTER,
+ WMI_GRP_WOW,
+ WMI_GRP_RTT,
+ WMI_GRP_SPECTRAL,
+ WMI_GRP_STATS,
+ WMI_GRP_ARP_NS_OFL,
+ WMI_GRP_NLO_OFL,
+ WMI_GRP_GTK_OFL,
+ WMI_GRP_CSA_OFL,
+ WMI_GRP_CHATTER,
+ WMI_GRP_TID_ADDBA,
+ WMI_GRP_MISC,
+ WMI_GRP_GPIO,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+/* Command IDs and command events for MAIN FW. */
+enum wmi_cmd_id {
+ WMI_INIT_CMDID = 0x1,
+
+ /* Scan specific commands */
+ WMI_START_SCAN_CMDID = WMI_CMD_GRP(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+
+ /* PDEV (physical device) specific commands */
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_CMD_GRP(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+
+ /* VDEV (virtual device) specific commands */
+ WMI_VDEV_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_PEER_CREATE_CMDID = WMI_CMD_GRP(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+ WMI_BCN_TX_CMDID = WMI_CMD_GRP(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+
+ /* commands to directly control BA negotiation directly from host. */
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_CMD_GRP(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+
+ /** DFS-specific commands */
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+
+ /* Roaming specific commands */
+ WMI_ROAM_SCAN_MODE = WMI_CMD_GRP(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+
+ /* offload scan specific commands */
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_CMD_GRP(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_CMD_GRP(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+
+ /* AP power save specific config */
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_CMD_GRP(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_PEER_RATE_RETRY_SCHED_CMDID =
+ WMI_CMD_GRP(WMI_GRP_RATE_CTRL),
+
+ /* WLAN Profiling commands. */
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_CMD_GRP(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_PDEV_SUSPEND_CMDID = WMI_CMD_GRP(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_ADD_BCN_FILTER_CMDID = WMI_CMD_GRP(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_CMD_GRP(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_RTT_MEASREQ_CMDID = WMI_CMD_GRP(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+
+ /* spectral scan commands */
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_CMD_GRP(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+
+ /* F/W stats */
+ WMI_REQUEST_STATS_CMDID = WMI_CMD_GRP(WMI_GRP_STATS),
+
+ /* ARP OFFLOAD REQUEST*/
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_ARP_NS_OFL),
+
+ /* NS offload confid*/
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_NLO_OFL),
+
+ /* GTK offload Specific WMI commands*/
+ WMI_GTK_OFFLOAD_CMDID = WMI_CMD_GRP(WMI_GRP_GTK_OFL),
+
+ /* CSA offload Specific WMI commands*/
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_CMD_GRP(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+
+ /* Chatter commands*/
+ WMI_CHATTER_SET_MODE_CMDID = WMI_CMD_GRP(WMI_GRP_CHATTER),
+
+ /* addba specific commands */
+ WMI_PEER_TID_ADDBA_CMDID = WMI_CMD_GRP(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+
+ /* set station mimo powersave method */
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ /* Configure the Station UAPSD AC Auto Trigger Parameters */
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+
+ /* STA Keep alive parameter configuration,
+ * Requires WMI_SERVICE_STA_KEEP_ALIVE
+ */
+ WMI_STA_KEEPALIVE_CMD,
+
+ /* misc command group */
+ WMI_ECHO_CMDID = WMI_CMD_GRP(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+
+ /* GPIO Configuration */
+ WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+};
+
+enum wmi_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
+
+ /* Scan specific events */
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+
+ /* PDEV specific events */
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+
+ /* VDEV specific events */
+ WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+
+ /* peer specific events */
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PEER),
+
+ /* beacon/mgmt specific events */
+ WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+
+ /* ADDBA Related WMI Events*/
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_ROAM_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+
+ /* WoW */
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_WOW),
+
+ /* RTT */
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+
+ /* GTK offload */
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+
+ /* CSA IE received event */
+ WMI_CSA_HANDLING_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_CSA_OFL),
+
+ /* Misc events */
+ WMI_ECHO_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+
+ /* GPIO Event */
+ WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
+};
+
+/* Command IDs and command events for 10.X firmware */
+enum wmi_10x_cmd_id {
+ WMI_10X_START_CMDID = 0x9000,
+ WMI_10X_END_CMDID = 0x9FFF,
+
+ /* initialize the wlan sub system */
+ WMI_10X_INIT_CMDID,
+
+ /* Scan specific commands */
+
+ WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
+ WMI_10X_STOP_SCAN_CMDID,
+ WMI_10X_SCAN_CHAN_LIST_CMDID,
+ WMI_10X_ECHO_CMDID,
+
+ /* PDEV(physical device) specific commands */
+ WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10X_PDEV_SET_CHANNEL_CMDID,
+ WMI_10X_PDEV_SET_PARAM_CMDID,
+ WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
+
+ /* VDEV(virtual device) specific commands */
+ WMI_10X_VDEV_CREATE_CMDID,
+ WMI_10X_VDEV_DELETE_CMDID,
+ WMI_10X_VDEV_START_REQUEST_CMDID,
+ WMI_10X_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10X_VDEV_UP_CMDID,
+ WMI_10X_VDEV_STOP_CMDID,
+ WMI_10X_VDEV_DOWN_CMDID,
+ WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10X_VDEV_SET_PARAM_CMDID,
+ WMI_10X_VDEV_INSTALL_KEY_CMDID,
+
+ /* peer specific commands */
+ WMI_10X_PEER_CREATE_CMDID,
+ WMI_10X_PEER_DELETE_CMDID,
+ WMI_10X_PEER_FLUSH_TIDS_CMDID,
+ WMI_10X_PEER_SET_PARAM_CMDID,
+ WMI_10X_PEER_ASSOC_CMDID,
+ WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10X_PEER_MCAST_GROUP_CMDID,
+
+ /* beacon/management specific commands */
+
+ WMI_10X_BCN_TX_CMDID,
+ WMI_10X_BCN_PRB_TMPL_CMDID,
+ WMI_10X_BCN_FILTER_RX_CMDID,
+ WMI_10X_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10X_MGMT_TX_CMDID,
+
+ /* commands to directly control ba negotiation directly from host. */
+ WMI_10X_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10X_ADDBA_SEND_CMDID,
+ WMI_10X_ADDBA_STATUS_CMDID,
+ WMI_10X_DELBA_SEND_CMDID,
+ WMI_10X_ADDBA_SET_RESP_CMDID,
+ WMI_10X_SEND_SINGLEAMSDU_CMDID,
+
+ /* Station power save specific config */
+ WMI_10X_STA_POWERSAVE_MODE_CMDID,
+ WMI_10X_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10X_STA_MIMO_PS_MODE_CMDID,
+
+ /* set debug log config */
+ WMI_10X_DBGLOG_CFG_CMDID,
+
+ /* DFS-specific commands */
+ WMI_10X_PDEV_DFS_ENABLE_CMDID,
+ WMI_10X_PDEV_DFS_DISABLE_CMDID,
+
+ /* QVIT specific command id */
+ WMI_10X_PDEV_QVIT_CMDID,
+
+ /* Offload Scan and Roaming related commands */
+ WMI_10X_ROAM_SCAN_MODE,
+ WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10X_ROAM_SCAN_PERIOD,
+ WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10X_ROAM_AP_PROFILE,
+ WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10X_OFL_SCAN_PERIOD,
+
+ /* P2P specific commands */
+ WMI_10X_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10X_P2P_GO_SET_BEACON_IE,
+ WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
+
+ /* AP power save specific config */
+ WMI_10X_AP_PS_PEER_PARAM_CMDID,
+ WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
+
+ /* Rate-control specific commands */
+ WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
+
+ /* WLAN Profiling commands. */
+ WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+
+ /* Suspend resume command Ids */
+ WMI_10X_PDEV_SUSPEND_CMDID,
+ WMI_10X_PDEV_RESUME_CMDID,
+
+ /* Beacon filter commands */
+ WMI_10X_ADD_BCN_FILTER_CMDID,
+ WMI_10X_RMV_BCN_FILTER_CMDID,
+
+ /* WOW Specific WMI commands*/
+ WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10X_WOW_ENABLE_CMDID,
+ WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+
+ /* RTT measurement related cmd */
+ WMI_10X_RTT_MEASREQ_CMDID,
+ WMI_10X_RTT_TSF_CMDID,
+
+ /* transmit beacon by value */
+ WMI_10X_PDEV_SEND_BCN_CMDID,
+
+ /* F/W stats */
+ WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10X_REQUEST_STATS_CMDID,
+
+ /* GPIO Configuration */
+ WMI_10X_GPIO_CONFIG_CMDID,
+ WMI_10X_GPIO_OUTPUT_CMDID,
+
+ WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
+};
+
+enum wmi_10x_event_id {
+ WMI_10X_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10X_READY_EVENTID,
+ WMI_10X_START_EVENTID = 0x9000,
+ WMI_10X_END_EVENTID = 0x9FFF,
+
+ /* Scan specific events */
+ WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
+ WMI_10X_ECHO_EVENTID,
+ WMI_10X_DEBUG_MESG_EVENTID,
+ WMI_10X_UPDATE_STATS_EVENTID,
+
+ /* Instantaneous RSSI event */
+ WMI_10X_INST_RSSI_STATS_EVENTID,
+
+ /* VDEV specific events */
+ WMI_10X_VDEV_START_RESP_EVENTID,
+ WMI_10X_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10X_VDEV_RESUME_REQ_EVENTID,
+ WMI_10X_VDEV_STOPPED_EVENTID,
+
+ /* peer specific events */
+ WMI_10X_PEER_STA_KICKOUT_EVENTID,
+
+ /* beacon/mgmt specific events */
+ WMI_10X_HOST_SWBA_EVENTID,
+ WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10X_MGMT_RX_EVENTID,
+
+ /* Channel stats event */
+ WMI_10X_CHAN_INFO_EVENTID,
+
+ /* PHY Error specific WMI event */
+ WMI_10X_PHYERR_EVENTID,
+
+ /* Roam event to trigger roaming on host */
+ WMI_10X_ROAM_EVENTID,
+
+ /* matching AP found from list of profiles */
+ WMI_10X_PROFILE_MATCH,
+
+ /* debug print message used for tracing FW code while debugging */
+ WMI_10X_DEBUG_PRINT_EVENTID,
+ /* VI spoecific event */
+ WMI_10X_PDEV_QVIT_EVENTID,
+ /* FW code profile data in response to profile request */
+ WMI_10X_WLAN_PROFILE_DATA_EVENTID,
+
+ /*RTT related event ID*/
+ WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10X_RTT_ERROR_REPORT_EVENTID,
+
+ WMI_10X_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10X_DCS_INTERFERENCE_EVENTID,
+
+ /* TPC config for the current operating channel */
+ WMI_10X_PDEV_TPC_CONFIG_EVENTID,
+
+ WMI_10X_GPIO_INPUT_EVENTID,
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1,
+};
+
+enum wmi_10_2_cmd_id {
+ WMI_10_2_START_CMDID = 0x9000,
+ WMI_10_2_END_CMDID = 0x9FFF,
+ WMI_10_2_INIT_CMDID,
+ WMI_10_2_START_SCAN_CMDID = WMI_10_2_START_CMDID,
+ WMI_10_2_STOP_SCAN_CMDID,
+ WMI_10_2_SCAN_CHAN_LIST_CMDID,
+ WMI_10_2_ECHO_CMDID,
+ WMI_10_2_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10_2_PDEV_SET_CHANNEL_CMDID,
+ WMI_10_2_PDEV_SET_PARAM_CMDID,
+ WMI_10_2_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10_2_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10_2_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10_2_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10_2_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10_2_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10_2_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10_2_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10_2_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_10_2_VDEV_CREATE_CMDID,
+ WMI_10_2_VDEV_DELETE_CMDID,
+ WMI_10_2_VDEV_START_REQUEST_CMDID,
+ WMI_10_2_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10_2_VDEV_UP_CMDID,
+ WMI_10_2_VDEV_STOP_CMDID,
+ WMI_10_2_VDEV_DOWN_CMDID,
+ WMI_10_2_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10_2_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10_2_VDEV_SET_PARAM_CMDID,
+ WMI_10_2_VDEV_INSTALL_KEY_CMDID,
+ WMI_10_2_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_2_PEER_CREATE_CMDID,
+ WMI_10_2_PEER_DELETE_CMDID,
+ WMI_10_2_PEER_FLUSH_TIDS_CMDID,
+ WMI_10_2_PEER_SET_PARAM_CMDID,
+ WMI_10_2_PEER_ASSOC_CMDID,
+ WMI_10_2_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10_2_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_10_2_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10_2_PEER_MCAST_GROUP_CMDID,
+ WMI_10_2_BCN_TX_CMDID,
+ WMI_10_2_BCN_PRB_TMPL_CMDID,
+ WMI_10_2_BCN_FILTER_RX_CMDID,
+ WMI_10_2_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10_2_MGMT_TX_CMDID,
+ WMI_10_2_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10_2_ADDBA_SEND_CMDID,
+ WMI_10_2_ADDBA_STATUS_CMDID,
+ WMI_10_2_DELBA_SEND_CMDID,
+ WMI_10_2_ADDBA_SET_RESP_CMDID,
+ WMI_10_2_SEND_SINGLEAMSDU_CMDID,
+ WMI_10_2_STA_POWERSAVE_MODE_CMDID,
+ WMI_10_2_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10_2_STA_MIMO_PS_MODE_CMDID,
+ WMI_10_2_DBGLOG_CFG_CMDID,
+ WMI_10_2_PDEV_DFS_ENABLE_CMDID,
+ WMI_10_2_PDEV_DFS_DISABLE_CMDID,
+ WMI_10_2_PDEV_QVIT_CMDID,
+ WMI_10_2_ROAM_SCAN_MODE,
+ WMI_10_2_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10_2_ROAM_SCAN_PERIOD,
+ WMI_10_2_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10_2_ROAM_AP_PROFILE,
+ WMI_10_2_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10_2_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10_2_OFL_SCAN_PERIOD,
+ WMI_10_2_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10_2_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10_2_P2P_GO_SET_BEACON_IE,
+ WMI_10_2_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_10_2_AP_PS_PEER_PARAM_CMDID,
+ WMI_10_2_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_10_2_PEER_RATE_RETRY_SCHED_CMDID,
+ WMI_10_2_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10_2_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10_2_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10_2_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10_2_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_10_2_PDEV_SUSPEND_CMDID,
+ WMI_10_2_PDEV_RESUME_CMDID,
+ WMI_10_2_ADD_BCN_FILTER_CMDID,
+ WMI_10_2_RMV_BCN_FILTER_CMDID,
+ WMI_10_2_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10_2_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10_2_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10_2_WOW_ENABLE_CMDID,
+ WMI_10_2_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_10_2_RTT_MEASREQ_CMDID,
+ WMI_10_2_RTT_TSF_CMDID,
+ WMI_10_2_RTT_KEEPALIVE_CMDID,
+ WMI_10_2_PDEV_SEND_BCN_CMDID,
+ WMI_10_2_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10_2_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10_2_REQUEST_STATS_CMDID,
+ WMI_10_2_GPIO_CONFIG_CMDID,
+ WMI_10_2_GPIO_OUTPUT_CMDID,
+ WMI_10_2_VDEV_RATEMASK_CMDID,
+ WMI_10_2_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_10_2_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_10_2_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_10_2_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_10_2_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_10_2_FORCE_FW_HANG_CMDID,
+ WMI_10_2_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_10_2_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_10_2_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_10_2_PDEV_RATEPWR_TABLE_CMDID,
+ WMI_10_2_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ WMI_10_2_PDEV_GET_INFO,
+ WMI_10_2_VDEV_GET_INFO,
+ WMI_10_2_VDEV_ATF_REQUEST_CMDID,
+ WMI_10_2_PEER_ATF_REQUEST_CMDID,
+ WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_10_2_MU_CAL_START_CMDID,
+ WMI_10_2_SET_LTEU_CONFIG_CMDID,
+ WMI_10_2_SET_CCA_PARAMS,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
+};
+
+enum wmi_10_2_event_id {
+ WMI_10_2_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10_2_READY_EVENTID,
+ WMI_10_2_DEBUG_MESG_EVENTID,
+ WMI_10_2_START_EVENTID = 0x9000,
+ WMI_10_2_END_EVENTID = 0x9FFF,
+ WMI_10_2_SCAN_EVENTID = WMI_10_2_START_EVENTID,
+ WMI_10_2_ECHO_EVENTID,
+ WMI_10_2_UPDATE_STATS_EVENTID,
+ WMI_10_2_INST_RSSI_STATS_EVENTID,
+ WMI_10_2_VDEV_START_RESP_EVENTID,
+ WMI_10_2_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10_2_VDEV_RESUME_REQ_EVENTID,
+ WMI_10_2_VDEV_STOPPED_EVENTID,
+ WMI_10_2_PEER_STA_KICKOUT_EVENTID,
+ WMI_10_2_HOST_SWBA_EVENTID,
+ WMI_10_2_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10_2_MGMT_RX_EVENTID,
+ WMI_10_2_CHAN_INFO_EVENTID,
+ WMI_10_2_PHYERR_EVENTID,
+ WMI_10_2_ROAM_EVENTID,
+ WMI_10_2_PROFILE_MATCH,
+ WMI_10_2_DEBUG_PRINT_EVENTID,
+ WMI_10_2_PDEV_QVIT_EVENTID,
+ WMI_10_2_WLAN_PROFILE_DATA_EVENTID,
+ WMI_10_2_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_2_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_2_RTT_ERROR_REPORT_EVENTID,
+ WMI_10_2_RTT_KEEPALIVE_EVENTID,
+ WMI_10_2_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10_2_DCS_INTERFERENCE_EVENTID,
+ WMI_10_2_PDEV_TPC_CONFIG_EVENTID,
+ WMI_10_2_GPIO_INPUT_EVENTID,
+ WMI_10_2_PEER_RATECODE_LIST_EVENTID,
+ WMI_10_2_GENERIC_BUFFER_EVENTID,
+ WMI_10_2_MCAST_BUF_RELEASE_EVENTID,
+ WMI_10_2_MCAST_LIST_AGEOUT_EVENTID,
+ WMI_10_2_WDS_PEER_EVENTID,
+ WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_10_2_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_2_MU_REPORT_EVENTID,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
+};
+
+enum wmi_10_4_cmd_id {
+ WMI_10_4_START_CMDID = 0x9000,
+ WMI_10_4_END_CMDID = 0x9FFF,
+ WMI_10_4_INIT_CMDID,
+ WMI_10_4_START_SCAN_CMDID = WMI_10_4_START_CMDID,
+ WMI_10_4_STOP_SCAN_CMDID,
+ WMI_10_4_SCAN_CHAN_LIST_CMDID,
+ WMI_10_4_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_10_4_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_10_4_ECHO_CMDID,
+ WMI_10_4_PDEV_SET_REGDOMAIN_CMDID,
+ WMI_10_4_PDEV_SET_CHANNEL_CMDID,
+ WMI_10_4_PDEV_SET_PARAM_CMDID,
+ WMI_10_4_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_10_4_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_10_4_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_10_4_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_10_4_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_10_4_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_10_4_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_4_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_10_4_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_10_4_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_10_4_VDEV_CREATE_CMDID,
+ WMI_10_4_VDEV_DELETE_CMDID,
+ WMI_10_4_VDEV_START_REQUEST_CMDID,
+ WMI_10_4_VDEV_RESTART_REQUEST_CMDID,
+ WMI_10_4_VDEV_UP_CMDID,
+ WMI_10_4_VDEV_STOP_CMDID,
+ WMI_10_4_VDEV_DOWN_CMDID,
+ WMI_10_4_VDEV_STANDBY_RESPONSE_CMDID,
+ WMI_10_4_VDEV_RESUME_RESPONSE_CMDID,
+ WMI_10_4_VDEV_SET_PARAM_CMDID,
+ WMI_10_4_VDEV_INSTALL_KEY_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_ADD_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_EVICT_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_RESTORE_PEER_CMDID,
+ WMI_10_4_WLAN_PEER_CACHING_PRINT_ALL_PEERS_INFO_CMDID,
+ WMI_10_4_PEER_CREATE_CMDID,
+ WMI_10_4_PEER_DELETE_CMDID,
+ WMI_10_4_PEER_FLUSH_TIDS_CMDID,
+ WMI_10_4_PEER_SET_PARAM_CMDID,
+ WMI_10_4_PEER_ASSOC_CMDID,
+ WMI_10_4_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_10_4_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_10_4_PEER_MCAST_GROUP_CMDID,
+ WMI_10_4_BCN_TX_CMDID,
+ WMI_10_4_PDEV_SEND_BCN_CMDID,
+ WMI_10_4_BCN_PRB_TMPL_CMDID,
+ WMI_10_4_BCN_FILTER_RX_CMDID,
+ WMI_10_4_PRB_REQ_FILTER_RX_CMDID,
+ WMI_10_4_MGMT_TX_CMDID,
+ WMI_10_4_PRB_TMPL_CMDID,
+ WMI_10_4_ADDBA_CLEAR_RESP_CMDID,
+ WMI_10_4_ADDBA_SEND_CMDID,
+ WMI_10_4_ADDBA_STATUS_CMDID,
+ WMI_10_4_DELBA_SEND_CMDID,
+ WMI_10_4_ADDBA_SET_RESP_CMDID,
+ WMI_10_4_SEND_SINGLEAMSDU_CMDID,
+ WMI_10_4_STA_POWERSAVE_MODE_CMDID,
+ WMI_10_4_STA_POWERSAVE_PARAM_CMDID,
+ WMI_10_4_STA_MIMO_PS_MODE_CMDID,
+ WMI_10_4_DBGLOG_CFG_CMDID,
+ WMI_10_4_PDEV_DFS_ENABLE_CMDID,
+ WMI_10_4_PDEV_DFS_DISABLE_CMDID,
+ WMI_10_4_PDEV_QVIT_CMDID,
+ WMI_10_4_ROAM_SCAN_MODE,
+ WMI_10_4_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_10_4_ROAM_SCAN_PERIOD,
+ WMI_10_4_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_10_4_ROAM_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_ADD_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_10_4_OFL_SCAN_PERIOD,
+ WMI_10_4_P2P_DEV_SET_DEVICE_INFO,
+ WMI_10_4_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_10_4_P2P_GO_SET_BEACON_IE,
+ WMI_10_4_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_10_4_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_10_4_AP_PS_PEER_PARAM_CMDID,
+ WMI_10_4_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_10_4_PEER_RATE_RETRY_SCHED_CMDID,
+ WMI_10_4_WLAN_PROFILE_TRIGGER_CMDID,
+ WMI_10_4_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_10_4_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_10_4_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_10_4_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_10_4_PDEV_SUSPEND_CMDID,
+ WMI_10_4_PDEV_RESUME_CMDID,
+ WMI_10_4_ADD_BCN_FILTER_CMDID,
+ WMI_10_4_RMV_BCN_FILTER_CMDID,
+ WMI_10_4_WOW_ADD_WAKE_PATTERN_CMDID,
+ WMI_10_4_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_10_4_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_10_4_WOW_ENABLE_CMDID,
+ WMI_10_4_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_10_4_RTT_MEASREQ_CMDID,
+ WMI_10_4_RTT_TSF_CMDID,
+ WMI_10_4_RTT_KEEPALIVE_CMDID,
+ WMI_10_4_OEM_REQ_CMDID,
+ WMI_10_4_NAN_CMDID,
+ WMI_10_4_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
+ WMI_10_4_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_10_4_REQUEST_STATS_CMDID,
+ WMI_10_4_GPIO_CONFIG_CMDID,
+ WMI_10_4_GPIO_OUTPUT_CMDID,
+ WMI_10_4_VDEV_RATEMASK_CMDID,
+ WMI_10_4_CSA_OFFLOAD_ENABLE_CMDID,
+ WMI_10_4_GTK_OFFLOAD_CMDID,
+ WMI_10_4_QBOOST_CFG_CMDID,
+ WMI_10_4_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_10_4_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_10_4_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_10_4_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_10_4_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_10_4_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_10_4_FORCE_FW_HANG_CMDID,
+ WMI_10_4_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_10_4_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_10_4_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_10_4_PDEV_RATEPWR_TABLE_CMDID,
+ WMI_10_4_PDEV_RATEPWR_CHAINMSK_TABLE_CMDID,
+ WMI_10_4_PDEV_FIPS_CMDID,
+ WMI_10_4_TT_SET_CONF_CMDID,
+ WMI_10_4_FWTEST_CMDID,
+ WMI_10_4_VDEV_ATF_REQUEST_CMDID,
+ WMI_10_4_PEER_ATF_REQUEST_CMDID,
+ WMI_10_4_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_10_4_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_10_4_PDEV_RESERVE_AST_ENTRY_CMDID,
+ WMI_10_4_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_10_4_PDEV_GET_TPC_CMDID,
+ WMI_10_4_PDEV_GET_AST_INFO_CMDID,
+ WMI_10_4_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_10_4_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_10_4_PDEV_GET_INFO_CMDID,
+ WMI_10_4_VDEV_GET_INFO_CMDID,
+ WMI_10_4_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_10_4_MU_CAL_START_CMDID,
+ WMI_10_4_SET_CCA_PARAMS_CMDID,
+ WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_10_4_EXT_RESOURCE_CFG_CMDID,
+ WMI_10_4_VDEV_SET_IE_CMDID,
+ WMI_10_4_SET_LTEU_CONFIG_CMDID,
+ WMI_10_4_ATF_SSID_GROUPING_REQUEST_CMDID,
+ WMI_10_4_PEER_ATF_EXT_REQUEST_CMDID,
+ WMI_10_4_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_10_4_PEER_BWF_REQUEST_CMDID,
+ WMI_10_4_BTCOEX_CFG_CMDID,
+ WMI_10_4_PEER_TX_MU_TXMIT_COUNT_CMDID,
+ WMI_10_4_PEER_TX_MU_TXMIT_RSTCNT_CMDID,
+ WMI_10_4_PEER_GID_USERPOS_LIST_CMDID,
+ WMI_10_4_PDEV_CHECK_CAL_VERSION_CMDID,
+ WMI_10_4_COEX_VERSION_CFG_CMID,
+ WMI_10_4_PDEV_GET_RX_FILTER_CMDID,
+ WMI_10_4_PDEV_EXTENDED_NSS_CFG_CMDID,
+ WMI_10_4_VDEV_SET_SCAN_NAC_RSSI_CMDID,
+ WMI_10_4_PROG_GPIO_BAND_SELECT_CMDID,
+ WMI_10_4_CONFIG_SMART_LOGGING_CMDID,
+ WMI_10_4_DEBUG_FATAL_CONDITION_CMDID,
+ WMI_10_4_GET_TSF_TIMER_CMDID,
+ WMI_10_4_PDEV_GET_TPC_TABLE_CMDID,
+ WMI_10_4_VDEV_SIFS_TRIGGER_TIME_CMDID,
+ WMI_10_4_PDEV_WDS_ENTRY_LIST_CMDID,
+ WMI_10_4_TDLS_SET_STATE_CMDID,
+ WMI_10_4_TDLS_PEER_UPDATE_CMDID,
+ WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_10_4_PDEV_SEND_FD_CMDID,
+ WMI_10_4_ENABLE_FILS_CMDID,
+ WMI_10_4_PDEV_SET_BRIDGE_MACADDR_CMDID,
+ WMI_10_4_ATF_GROUP_WMM_AC_CONFIG_REQUEST_CMDID,
+ WMI_10_4_RADAR_FOUND_CMDID,
+ WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
+};
+
+enum wmi_10_4_event_id {
+ WMI_10_4_SERVICE_READY_EVENTID = 0x8000,
+ WMI_10_4_READY_EVENTID,
+ WMI_10_4_DEBUG_MESG_EVENTID,
+ WMI_10_4_START_EVENTID = 0x9000,
+ WMI_10_4_END_EVENTID = 0x9FFF,
+ WMI_10_4_SCAN_EVENTID = WMI_10_4_START_EVENTID,
+ WMI_10_4_ECHO_EVENTID,
+ WMI_10_4_UPDATE_STATS_EVENTID,
+ WMI_10_4_INST_RSSI_STATS_EVENTID,
+ WMI_10_4_VDEV_START_RESP_EVENTID,
+ WMI_10_4_VDEV_STANDBY_REQ_EVENTID,
+ WMI_10_4_VDEV_RESUME_REQ_EVENTID,
+ WMI_10_4_VDEV_STOPPED_EVENTID,
+ WMI_10_4_PEER_STA_KICKOUT_EVENTID,
+ WMI_10_4_HOST_SWBA_EVENTID,
+ WMI_10_4_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_10_4_MGMT_RX_EVENTID,
+ WMI_10_4_CHAN_INFO_EVENTID,
+ WMI_10_4_PHYERR_EVENTID,
+ WMI_10_4_ROAM_EVENTID,
+ WMI_10_4_PROFILE_MATCH,
+ WMI_10_4_DEBUG_PRINT_EVENTID,
+ WMI_10_4_PDEV_QVIT_EVENTID,
+ WMI_10_4_WLAN_PROFILE_DATA_EVENTID,
+ WMI_10_4_RTT_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_RTT_ERROR_REPORT_EVENTID,
+ WMI_10_4_RTT_KEEPALIVE_EVENTID,
+ WMI_10_4_OEM_CAPABILITY_EVENTID,
+ WMI_10_4_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_10_4_OEM_ERROR_REPORT_EVENTID,
+ WMI_10_4_NAN_EVENTID,
+ WMI_10_4_WOW_WAKEUP_HOST_EVENTID,
+ WMI_10_4_GTK_OFFLOAD_STATUS_EVENTID,
+ WMI_10_4_GTK_REKEY_FAIL_EVENTID,
+ WMI_10_4_DCS_INTERFERENCE_EVENTID,
+ WMI_10_4_PDEV_TPC_CONFIG_EVENTID,
+ WMI_10_4_CSA_HANDLING_EVENTID,
+ WMI_10_4_GPIO_INPUT_EVENTID,
+ WMI_10_4_PEER_RATECODE_LIST_EVENTID,
+ WMI_10_4_GENERIC_BUFFER_EVENTID,
+ WMI_10_4_MCAST_BUF_RELEASE_EVENTID,
+ WMI_10_4_MCAST_LIST_AGEOUT_EVENTID,
+ WMI_10_4_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_10_4_WDS_PEER_EVENTID,
+ WMI_10_4_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_10_4_PDEV_FIPS_EVENTID,
+ WMI_10_4_TT_STATS_EVENTID,
+ WMI_10_4_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_10_4_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_10_4_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_10_4_PDEV_RESERVE_AST_ENTRY_EVENTID,
+ WMI_10_4_PDEV_NFCAL_POWER_EVENTID,
+ WMI_10_4_PDEV_TPC_EVENTID,
+ WMI_10_4_PDEV_GET_AST_INFO_EVENTID,
+ WMI_10_4_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_4_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_10_4_MU_REPORT_EVENTID,
+ WMI_10_4_TX_DATA_TRAFFIC_CTRL_EVENTID,
+ WMI_10_4_PEER_TX_MU_TXMIT_COUNT_EVENTID,
+ WMI_10_4_PEER_GID_USERPOS_LIST_EVENTID,
+ WMI_10_4_PDEV_CHECK_CAL_VERSION_EVENTID,
+ WMI_10_4_ATF_PEER_STATS_EVENTID,
+ WMI_10_4_PDEV_GET_RX_FILTER_EVENTID,
+ WMI_10_4_NAC_RSSI_EVENTID,
+ WMI_10_4_DEBUG_FATAL_CONDITION_EVENTID,
+ WMI_10_4_GET_TSF_TIMER_RESP_EVENTID,
+ WMI_10_4_PDEV_TPC_TABLE_EVENTID,
+ WMI_10_4_PDEV_WDS_ENTRY_LIST_EVENTID,
+ WMI_10_4_TDLS_PEER_EVENTID,
+ WMI_10_4_HOST_SWFDA_EVENTID,
+ WMI_10_4_ESP_ESTIMATE_EVENTID,
+ WMI_10_4_DFS_STATUS_CHECK_EVENTID,
+ WMI_10_4_PDEV_UTF_EVENTID = WMI_10_4_END_EVENTID - 1,
+};
+
+enum wmi_phy_mode {
+ MODE_11A = 0, /* 11a Mode */
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4, /* 11a HT20 mode */
+ MODE_11NG_HT20 = 5, /* 11g HT20 mode */
+ MODE_11NA_HT40 = 6, /* 11a HT40 mode */
+ MODE_11NG_HT40 = 7, /* 11g HT40 mode */
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ /* MODE_11AC_VHT160 = 11, */
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_UNKNOWN = 16,
+ MODE_MAX = 16
+};
+
+static inline const char *ath10k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ };
+
+ return "<unknown>";
+}
+
+#define WMI_CHAN_LIST_TAG 0x1
+#define WMI_SSID_LIST_TAG 0x2
+#define WMI_BSSID_LIST_TAG 0x3
+#define WMI_IE_TAG 0x4
+
+struct wmi_channel {
+ __le32 mhz;
+ __le32 band_center_freq1;
+ __le32 band_center_freq2; /* valid for 11ac, 80plus80 */
+ union {
+ __le32 flags; /* WMI_CHAN_FLAG_ */
+ struct {
+ u8 mode; /* only 6 LSBs */
+ } __packed;
+ } __packed;
+ union {
+ __le32 reginfo0;
+ struct {
+ /* note: power unit is 0.5 dBm */
+ u8 min_power;
+ u8 max_power;
+ u8 reg_power;
+ u8 reg_classid;
+ } __packed;
+ } __packed;
+ union {
+ __le32 reginfo1;
+ struct {
+ /* note: power unit is 1 dBm */
+ u8 antenna_max;
+ /* note: power unit is 0.5 dBm */
+ u8 max_tx_power;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_channel_arg {
+ u32 freq;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ /* note: power unit is 0.5 dBm */
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ /* note: power unit is 1 dBm */
+ u32 max_antenna_gain;
+ u32 reg_class_id;
+ enum wmi_phy_mode mode;
+};
+
+enum wmi_channel_change_cause {
+ WMI_CHANNEL_CHANGE_CAUSE_NONE = 0,
+ WMI_CHANNEL_CHANGE_CAUSE_CSA,
+};
+
+#define WMI_CHAN_FLAG_HT40_PLUS (1 << 6)
+#define WMI_CHAN_FLAG_PASSIVE (1 << 7)
+#define WMI_CHAN_FLAG_ADHOC_ALLOWED (1 << 8)
+#define WMI_CHAN_FLAG_AP_DISABLED (1 << 9)
+#define WMI_CHAN_FLAG_DFS (1 << 10)
+#define WMI_CHAN_FLAG_ALLOW_HT (1 << 11)
+#define WMI_CHAN_FLAG_ALLOW_VHT (1 << 12)
+
+/* Indicate reason for channel switch */
+#define WMI_CHANNEL_CHANGE_CAUSE_CSA (1 << 13)
+
+#define WMI_MAX_SPATIAL_STREAM 3 /* default max ss */
+
+/* HT Capabilities*/
+#define WMI_HT_CAP_ENABLED 0x0001 /* HT Enabled/ disabled */
+#define WMI_HT_CAP_HT20_SGI 0x0002 /* Short Guard Interval with HT20 */
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004 /* Dynamic MIMO powersave */
+#define WMI_HT_CAP_TX_STBC 0x0008 /* B3 TX STBC */
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030 /* B4-B5 RX STBC */
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040 /* LDPC supported */
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080 /* L-SIG TXOP Protection */
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700 /* MPDU Density */
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+/*
+ * WMI_VHT_CAP_* these maps to ieee 802.11ac vht capability information
+ * field. The fields not defined here are not supported, or reserved.
+ * Do not change these masks and if you have to add new one follow the
+ * bitmask as specified by 802.11ac draft.
+ */
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIFT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+/* The following also refer for max HT AMSDU */
+#define WMI_VHT_CAP_MAX_MPDU_LEN_3839 0x00000000
+#define WMI_VHT_CAP_MAX_MPDU_LEN_7935 0x00000001
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+/*
+ * Interested readers refer to Rx/Tx MCS Map definition as defined in
+ * 802.11ac
+ */
+#define WMI_VHT_MAX_MCS_4_SS_MASK(r, ss) ((3 & (r)) << (((ss) - 1) << 1))
+#define WMI_VHT_MAX_SUPP_RATE_MASK 0x1fff0000
+#define WMI_VHT_MAX_SUPP_RATE_MASK_SHIFT 16
+
+enum {
+ REGDMN_MODE_11A = 0x00001, /* 11a channels */
+ REGDMN_MODE_TURBO = 0x00002, /* 11a turbo-only channels */
+ REGDMN_MODE_11B = 0x00004, /* 11b channels */
+ REGDMN_MODE_PUREG = 0x00008, /* 11g channels (OFDM only) */
+ REGDMN_MODE_11G = 0x00008, /* XXX historical */
+ REGDMN_MODE_108G = 0x00020, /* 11a+Turbo channels */
+ REGDMN_MODE_108A = 0x00040, /* 11g+Turbo channels */
+ REGDMN_MODE_XR = 0x00100, /* XR channels */
+ REGDMN_MODE_11A_HALF_RATE = 0x00200, /* 11A half rate channels */
+ REGDMN_MODE_11A_QUARTER_RATE = 0x00400, /* 11A quarter rate channels */
+ REGDMN_MODE_11NG_HT20 = 0x00800, /* 11N-G HT20 channels */
+ REGDMN_MODE_11NA_HT20 = 0x01000, /* 11N-A HT20 channels */
+ REGDMN_MODE_11NG_HT40PLUS = 0x02000, /* 11N-G HT40 + channels */
+ REGDMN_MODE_11NG_HT40MINUS = 0x04000, /* 11N-G HT40 - channels */
+ REGDMN_MODE_11NA_HT40PLUS = 0x08000, /* 11N-A HT40 + channels */
+ REGDMN_MODE_11NA_HT40MINUS = 0x10000, /* 11N-A HT40 - channels */
+ REGDMN_MODE_11AC_VHT20 = 0x20000, /* 5Ghz, VHT20 */
+ REGDMN_MODE_11AC_VHT40PLUS = 0x40000, /* 5Ghz, VHT40 + channels */
+ REGDMN_MODE_11AC_VHT40MINUS = 0x80000, /* 5Ghz VHT40 - channels */
+ REGDMN_MODE_11AC_VHT80 = 0x100000, /* 5Ghz, VHT80 channels */
+ REGDMN_MODE_11AC_VHT160 = 0x200000, /* 5Ghz, VHT160 channels */
+ REGDMN_MODE_11AC_VHT80_80 = 0x400000, /* 5Ghz, VHT80+80 channels */
+ REGDMN_MODE_ALL = 0xffffffff
+};
+
+#define REGDMN_CAP1_CHAN_HALF_RATE 0x00000001
+#define REGDMN_CAP1_CHAN_QUARTER_RATE 0x00000002
+#define REGDMN_CAP1_CHAN_HAL49GHZ 0x00000004
+
+/* regulatory capabilities */
+#define REGDMN_EEPROM_EEREGCAP_EN_FCC_MIDBAND 0x0040
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_EVEN 0x0080
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U2 0x0100
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_MIDBAND 0x0200
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_U1_ODD 0x0400
+#define REGDMN_EEPROM_EEREGCAP_EN_KK_NEW_11A 0x0800
+
+struct hal_reg_capabilities {
+ /* regdomain value specified in EEPROM */
+ __le32 eeprom_rd;
+ /*regdomain */
+ __le32 eeprom_rd_ext;
+ /* CAP1 capabilities bit map. */
+ __le32 regcap1;
+ /* REGDMN EEPROM CAP. */
+ __le32 regcap2;
+ /* REGDMN MODE */
+ __le32 wireless_modes;
+ __le32 low_2ghz_chan;
+ __le32 high_2ghz_chan;
+ __le32 low_5ghz_chan;
+ __le32 high_5ghz_chan;
+} __packed;
+
+enum wlan_mode_capability {
+ WHAL_WLAN_11A_CAPABILITY = 0x1,
+ WHAL_WLAN_11G_CAPABILITY = 0x2,
+ WHAL_WLAN_11AG_CAPABILITY = 0x3,
+};
+
+/* structure used by FW for requesting host memory */
+struct wlan_host_mem_req {
+ /* ID of the request */
+ __le32 req_id;
+ /* size of the of each unit */
+ __le32 unit_size;
+ /* flags to indicate that
+ * the number units is dependent
+ * on number of resources(num vdevs num peers .. etc)
+ */
+ __le32 num_unit_info;
+ /*
+ * actual number of units to allocate . if flags in the num_unit_info
+ * indicate that number of units is tied to number of a particular
+ * resource to allocate then num_units filed is set to 0 and host
+ * will derive the number units from number of the resources it is
+ * requesting.
+ */
+ __le32 num_units;
+} __packed;
+
+/*
+ * The following struct holds optional payload for
+ * wmi_service_ready_event,e.g., 11ac pass some of the
+ * device capability to the host.
+ */
+struct wmi_service_ready_event {
+ __le32 sw_version;
+ __le32 sw_version_1;
+ __le32 abi_version;
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[16];
+ __le32 num_rf_chains;
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+ struct hal_reg_capabilities hal_reg_capabilities;
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+ /*
+ * Max beacon and Probe Response IE offload size
+ * (includes optional P2P IEs)
+ */
+ __le32 max_bcn_ie_size;
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+ struct wlan_host_mem_req mem_reqs[0];
+} __packed;
+
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_service_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+
+ /* WMI_PHY_CAPABILITY */
+ __le32 phy_capability;
+
+ /* Maximum number of frag table entries that SW will populate less 1 */
+ __le32 max_frag_entry;
+ __le32 wmi_service_bitmap[16];
+ __le32 num_rf_chains;
+
+ /*
+ * The following field is only valid for service type
+ * WMI_SERVICE_11AC
+ */
+ __le32 ht_cap_info; /* WMI HT Capability */
+ __le32 vht_cap_info; /* VHT capability info field of 802.11ac */
+ __le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
+ __le32 hw_min_tx_power;
+ __le32 hw_max_tx_power;
+
+ struct hal_reg_capabilities hal_reg_capabilities;
+
+ __le32 sys_cap_info;
+ __le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
+
+ /*
+ * request to host to allocate a chuck of memory and pss it down to FW
+ * via WM_INIT. FW uses this as FW extesnsion memory for saving its
+ * data structures. Only valid for low latency interfaces like PCIE
+ * where FW can access this memory directly (or) by DMA.
+ */
+ __le32 num_mem_reqs;
+
+ struct wlan_host_mem_req mem_reqs[0];
+} __packed;
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ)
+
+struct wmi_ready_event {
+ __le32 sw_version;
+ __le32 abi_version;
+ struct wmi_mac_addr mac_addr;
+ __le32 status;
+} __packed;
+
+struct wmi_resource_config {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /*
+ * In offload mode target supports features like WOW, chatter and
+ * other protocol offloads. In order to support them some
+ * functionalities like reorder buffering, PN checking need to be
+ * done in target. This determines maximum number of peers supported
+ * by target in offload mode
+ */
+ __le32 num_offload_peers;
+
+ /* For target-based RX reordering */
+ __le32 num_offload_reorder_bufs;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum number of scan requests that can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* maximum VDEV that could use GTK offload */
+ __le32 gtk_offload_max_vdev;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overridden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+struct wmi_resource_config_10x {
+ /* number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* number of peer nodes to support */
+ __le32 num_peers;
+
+ /* number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /*
+ * max skid for resolving hash collisions
+ *
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /*
+ * the nominal chain mask for transmit
+ *
+ * The chain mask may be modified dynamically, e.g. to operate AP
+ * tx with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /*
+ * the nominal chain mask for receive
+ *
+ * The chain mask may be modified dynamically, e.g. for a client
+ * to use a reduced number of chains for receive if the traffic to
+ * the client is low enough that it doesn't require downlink MIMO
+ * or antenna diversity.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /*
+ * what rx reorder timeout (ms) to use for the AC
+ *
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have
+ * already been received.
+ * This parameter specifies the timeout in milliseconds for each
+ * class.
+ */
+ __le32 rx_timeout_pri_vi;
+ __le32 rx_timeout_pri_vo;
+ __le32 rx_timeout_pri_be;
+ __le32 rx_timeout_pri_bk;
+
+ /*
+ * what mode the rx should decap packets to
+ *
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types
+ * THis setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ /* what is the maximum number of scan requests that can be queued */
+ __le32 scan_max_pending_reqs;
+
+ /* maximum VDEV that could use BMISS offload */
+ __le32 bmiss_offload_max_vdev;
+
+ /* maximum VDEV that could use offload roaming */
+ __le32 roam_offload_max_vdev;
+
+ /* maximum AP profiles that would push to offload roaming */
+ __le32 roam_offload_max_ap_profiles;
+
+ /*
+ * how many groups to use for mcast->ucast conversion
+ *
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each
+ * peer within the multicast group.
+ This num_mcast_groups configuration parameter tells the target how
+ * many multicast groups to provide storage for within its multicast
+ * group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /*
+ * size to alloc for the mcast membership table
+ *
+ * This num_mcast_table_elems configuration parameter tells the
+ * target how many peer elements it needs to provide storage for in
+ * its multicast group membership table.
+ * These multicast group membership table elements are shared by the
+ * multicast groups stored within the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /*
+ * whether/how to do multicast->unicast conversion
+ *
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group
+ * membership table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * drop the frame.
+ * 2 -> Convert multicast frames to unicast, if the IP multicast
+ * address from the tx frame is found in the multicast group
+ * membership table. If the IP multicast address is not found,
+ * transmit the frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /*
+ * how much memory to allocate for a tx PPDU dbg log
+ *
+ * This parameter controls how much memory the target will allocate
+ * to store a log of tx PPDU meta-information (how large the PPDU
+ * was, when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* how many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /*
+ * MAC DMA burst size, e.g., For target PCI limit can be
+ * 0 -default, 1 256B
+ */
+ __le32 dma_burst_size;
+
+ /*
+ * Fixed delimiters to be inserted after every MPDU to
+ * account for interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /*
+ * determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments.
+ *
+ * A-MPDU reordering is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /*
+ * Configuration for VoW :
+ * No of Video Nodes to be supported
+ * and Max no of descriptors for each Video link (node).
+ */
+ __le32 vow_config;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /*
+ * Max. number of Tx fragments per MSDU
+ * This parameter controls the max number of Tx fragments per MSDU.
+ * This is sent by the target as part of the WMI_SERVICE_READY event
+ * and is overridden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+} __packed;
+
+enum wmi_10_2_feature_mask {
+ WMI_10_2_RX_BATCH_MODE = BIT(0),
+ WMI_10_2_ATF_CONFIG = BIT(1),
+ WMI_10_2_COEX_GPIO = BIT(3),
+ WMI_10_2_BSS_CHAN_INFO = BIT(6),
+ WMI_10_2_PEER_STATS = BIT(7),
+};
+
+struct wmi_resource_config_10_2 {
+ struct wmi_resource_config_10x common;
+ __le32 max_peer_ext_stats;
+ __le32 smart_ant_cap; /* 0-disable, 1-enable */
+ __le32 bk_min_free;
+ __le32 be_min_free;
+ __le32 vi_min_free;
+ __le32 vo_min_free;
+ __le32 feature_mask;
+} __packed;
+
+#define NUM_UNITS_IS_NUM_VDEVS BIT(0)
+#define NUM_UNITS_IS_NUM_PEERS BIT(1)
+#define NUM_UNITS_IS_NUM_ACTIVE_PEERS BIT(2)
+
+struct wmi_resource_config_10_4 {
+ /* Number of virtual devices (VAPs) to support */
+ __le32 num_vdevs;
+
+ /* Number of peer nodes to support */
+ __le32 num_peers;
+
+ /* Number of active peer nodes to support */
+ __le32 num_active_peers;
+
+ /* In offload mode, target supports features like WOW, chatter and other
+ * protocol offloads. In order to support them some functionalities like
+ * reorder buffering, PN checking need to be done in target.
+ * This determines maximum number of peers supported by target in
+ * offload mode.
+ */
+ __le32 num_offload_peers;
+
+ /* Number of reorder buffers available for doing target based reorder
+ * Rx reorder buffering
+ */
+ __le32 num_offload_reorder_buffs;
+
+ /* Number of keys per peer */
+ __le32 num_peer_keys;
+
+ /* Total number of TX/RX data TIDs */
+ __le32 num_tids;
+
+ /* Max skid for resolving hash collisions.
+ * The address search table is sparse, so that if two MAC addresses
+ * result in the same hash value, the second of these conflicting
+ * entries can slide to the next index in the address search table,
+ * and use it, if it is unoccupied. This ast_skid_limit parameter
+ * specifies the upper bound on how many subsequent indices to search
+ * over to find an unoccupied space.
+ */
+ __le32 ast_skid_limit;
+
+ /* The nominal chain mask for transmit.
+ * The chain mask may be modified dynamically, e.g. to operate AP tx
+ * with a reduced number of chains if no clients are associated.
+ * This configuration parameter specifies the nominal chain-mask that
+ * should be used when not operating with a reduced set of tx chains.
+ */
+ __le32 tx_chain_mask;
+
+ /* The nominal chain mask for receive.
+ * The chain mask may be modified dynamically, e.g. for a client to use
+ * a reduced number of chains for receive if the traffic to the client
+ * is low enough that it doesn't require downlink MIMO or antenna
+ * diversity. This configuration parameter specifies the nominal
+ * chain-mask that should be used when not operating with a reduced
+ * set of rx chains.
+ */
+ __le32 rx_chain_mask;
+
+ /* What rx reorder timeout (ms) to use for the AC.
+ * Each WMM access class (voice, video, best-effort, background) will
+ * have its own timeout value to dictate how long to wait for missing
+ * rx MPDUs to arrive before flushing subsequent MPDUs that have already
+ * been received. This parameter specifies the timeout in milliseconds
+ * for each class.
+ */
+ __le32 rx_timeout_pri[4];
+
+ /* What mode the rx should decap packets to.
+ * MAC can decap to RAW (no decap), native wifi or Ethernet types.
+ * This setting also determines the default TX behavior, however TX
+ * behavior can be modified on a per VAP basis during VAP init
+ */
+ __le32 rx_decap_mode;
+
+ __le32 scan_max_pending_req;
+
+ __le32 bmiss_offload_max_vdev;
+
+ __le32 roam_offload_max_vdev;
+
+ __le32 roam_offload_max_ap_profiles;
+
+ /* How many groups to use for mcast->ucast conversion.
+ * The target's WAL maintains a table to hold information regarding
+ * which peers belong to a given multicast group, so that if
+ * multicast->unicast conversion is enabled, the target can convert
+ * multicast tx frames to a series of unicast tx frames, to each peer
+ * within the multicast group. This num_mcast_groups configuration
+ * parameter tells the target how many multicast groups to provide
+ * storage for within its multicast group membership table.
+ */
+ __le32 num_mcast_groups;
+
+ /* Size to alloc for the mcast membership table.
+ * This num_mcast_table_elems configuration parameter tells the target
+ * how many peer elements it needs to provide storage for in its
+ * multicast group membership table. These multicast group membership
+ * table elements are shared by the multicast groups stored within
+ * the table.
+ */
+ __le32 num_mcast_table_elems;
+
+ /* Whether/how to do multicast->unicast conversion.
+ * This configuration parameter specifies whether the target should
+ * perform multicast --> unicast conversion on transmit, and if so,
+ * what to do if it finds no entries in its multicast group membership
+ * table for the multicast IP address in the tx frame.
+ * Configuration value:
+ * 0 -> Do not perform multicast to unicast conversion.
+ * 1 -> Convert multicast frames to unicast, if the IP multicast address
+ * from the tx frame is found in the multicast group membership
+ * table. If the IP multicast address is not found, drop the frame
+ * 2 -> Convert multicast frames to unicast, if the IP multicast address
+ * from the tx frame is found in the multicast group membership
+ * table. If the IP multicast address is not found, transmit the
+ * frame as multicast.
+ */
+ __le32 mcast2ucast_mode;
+
+ /* How much memory to allocate for a tx PPDU dbg log.
+ * This parameter controls how much memory the target will allocate to
+ * store a log of tx PPDU meta-information (how large the PPDU was,
+ * when it was sent, whether it was successful, etc.)
+ */
+ __le32 tx_dbg_log_size;
+
+ /* How many AST entries to be allocated for WDS */
+ __le32 num_wds_entries;
+
+ /* MAC DMA burst size. 0 -default, 1 -256B */
+ __le32 dma_burst_size;
+
+ /* Fixed delimiters to be inserted after every MPDU to account for
+ * interface latency to avoid underrun.
+ */
+ __le32 mac_aggr_delim;
+
+ /* Determine whether target is responsible for detecting duplicate
+ * non-aggregate MPDU and timing out stale fragments. A-MPDU reordering
+ * is always performed on the target.
+ *
+ * 0: target responsible for frag timeout and dup checking
+ * 1: host responsible for frag timeout and dup checking
+ */
+ __le32 rx_skip_defrag_timeout_dup_detection_check;
+
+ /* Configuration for VoW : No of Video nodes to be supported and max
+ * no of descriptors for each video link (node).
+ */
+ __le32 vow_config;
+
+ /* Maximum vdev that could use gtk offload */
+ __le32 gtk_offload_max_vdev;
+
+ /* Number of msdu descriptors target should use */
+ __le32 num_msdu_desc;
+
+ /* Max number of tx fragments per MSDU.
+ * This parameter controls the max number of tx fragments per MSDU.
+ * This will passed by target as part of the WMI_SERVICE_READY event
+ * and is overridden by the OS shim as required.
+ */
+ __le32 max_frag_entries;
+
+ /* Max number of extended peer stats.
+ * This parameter controls the max number of peers for which extended
+ * statistics are supported by target
+ */
+ __le32 max_peer_ext_stats;
+
+ /* Smart antenna capabilities information.
+ * 1 - Smart antenna is enabled
+ * 0 - Smart antenna is disabled
+ * In future this can contain smart antenna specific capabilities.
+ */
+ __le32 smart_ant_cap;
+
+ /* User can configure the buffers allocated for each AC (BE, BK, VI, VO)
+ * during init.
+ */
+ __le32 bk_minfree;
+ __le32 be_minfree;
+ __le32 vi_minfree;
+ __le32 vo_minfree;
+
+ /* Rx batch mode capability.
+ * 1 - Rx batch mode enabled
+ * 0 - Rx batch mode disabled
+ */
+ __le32 rx_batchmode;
+
+ /* Thermal throttling capability.
+ * 1 - Capable of thermal throttling
+ * 0 - Not capable of thermal throttling
+ */
+ __le32 tt_support;
+
+ /* ATF configuration.
+ * 1 - Enable ATF
+ * 0 - Disable ATF
+ */
+ __le32 atf_config;
+
+ /* Configure padding to manage IP header un-alignment
+ * 1 - Enable padding
+ * 0 - Disable padding
+ */
+ __le32 iphdr_pad_config;
+
+ /* qwrap configuration (bits 15-0)
+ * 1 - This is qwrap configuration
+ * 0 - This is not qwrap
+ *
+ * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables)
+ * In order to get ack-RSSI reporting and to specify the tx-rate for
+ * individual frames, this option must be enabled. This uses an extra
+ * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it.
+ */
+ __le32 qwrap_config;
+} __packed;
+
+enum wmi_coex_version {
+ WMI_NO_COEX_VERSION_SUPPORT = 0,
+ /* 3 wire coex support*/
+ WMI_COEX_VERSION_1 = 1,
+ /* 2.5 wire coex support*/
+ WMI_COEX_VERSION_2 = 2,
+ /* 2.5 wire coex with duty cycle support */
+ WMI_COEX_VERSION_3 = 3,
+ /* 4 wire coex support*/
+ WMI_COEX_VERSION_4 = 4,
+};
+
+/**
+ * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
+ * @WMI_10_4_LTEU_SUPPORT: LTEU config
+ * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config
+ * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan
+ * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
+ * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
+ * @WMI_10_4_PEER_STATS: Per station stats
+ * @WMI_10_4_VDEV_STATS: Per vdev stats
+ * @WMI_10_4_TDLS: Implicit TDLS support in firmware enable/disable
+ * @WMI_10_4_TDLS_OFFCHAN: TDLS offchannel support enable/disable
+ * @WMI_10_4_TDLS_UAPSD_BUFFER_STA: TDLS buffer sta support enable/disable
+ * @WMI_10_4_TDLS_UAPSD_SLEEP_STA: TDLS sleep sta support enable/disable
+ * @WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE: TDLS connection tracker in host
+ * enable/disable
+ * @WMI_10_4_TDLS_EXPLICIT_MODE_ONLY:Explicit TDLS mode enable/disable
+ */
+enum wmi_10_4_feature_mask {
+ WMI_10_4_LTEU_SUPPORT = BIT(0),
+ WMI_10_4_COEX_GPIO_SUPPORT = BIT(1),
+ WMI_10_4_AUX_RADIO_SPECTRAL_INTF = BIT(2),
+ WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3),
+ WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4),
+ WMI_10_4_PEER_STATS = BIT(5),
+ WMI_10_4_VDEV_STATS = BIT(6),
+ WMI_10_4_TDLS = BIT(7),
+ WMI_10_4_TDLS_OFFCHAN = BIT(8),
+ WMI_10_4_TDLS_UAPSD_BUFFER_STA = BIT(9),
+ WMI_10_4_TDLS_UAPSD_SLEEP_STA = BIT(10),
+ WMI_10_4_TDLS_CONN_TRACKER_IN_HOST_MODE = BIT(11),
+ WMI_10_4_TDLS_EXPLICIT_MODE_ONLY = BIT(12),
+
+};
+
+struct wmi_ext_resource_config_10_4_cmd {
+ /* contains enum wmi_host_platform_type */
+ __le32 host_platform_config;
+ /* see enum wmi_10_4_feature_mask */
+ __le32 fw_feature_bitmap;
+ /* WLAN priority GPIO number */
+ __le32 wlan_gpio_priority;
+ /* see enum wmi_coex_version */
+ __le32 coex_version;
+ /* COEX GPIO config */
+ __le32 coex_gpio_pin1;
+ __le32 coex_gpio_pin2;
+ __le32 coex_gpio_pin3;
+ /* number of vdevs allowed to perform tdls */
+ __le32 num_tdls_vdevs;
+ /* number of peers to track per TDLS vdev */
+ __le32 num_tdls_conn_table_entries;
+ /* number of tdls sleep sta supported */
+ __le32 max_tdls_concurrent_sleep_sta;
+ /* number of tdls buffer sta supported */
+ __le32 max_tdls_concurrent_buffer_sta;
+};
+
+/* structure describing host memory chunk. */
+struct host_memory_chunk {
+ /* id of the request that is passed up in service ready */
+ __le32 req_id;
+ /* the physical address the memory chunk */
+ __le32 ptr;
+ /* size of the chunk */
+ __le32 size;
+} __packed;
+
+struct wmi_host_mem_chunks {
+ __le32 count;
+ /* some fw revisions require at least 1 chunk regardless of count */
+ struct host_memory_chunk items[1];
+} __packed;
+
+struct wmi_init_cmd {
+ struct wmi_resource_config resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+/* _10x structure is from 10.X FW API */
+struct wmi_init_cmd_10x {
+ struct wmi_resource_config_10x resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_init_cmd_10_2 {
+ struct wmi_resource_config_10_2 resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_init_cmd_10_4 {
+ struct wmi_resource_config_10_4 resource_config;
+ struct wmi_host_mem_chunks mem_chunks;
+} __packed;
+
+struct wmi_chan_list_entry {
+ __le16 freq;
+ u8 phy_mode; /* valid for 10.2 only */
+ u8 reserved;
+} __packed;
+
+/* TLV for channel list */
+struct wmi_chan_list {
+ __le32 tag; /* WMI_CHAN_LIST_TAG */
+ __le32 num_chan;
+ struct wmi_chan_list_entry channel_list[0];
+} __packed;
+
+struct wmi_bssid_list {
+ __le32 tag; /* WMI_BSSID_LIST_TAG */
+ __le32 num_bssid;
+ struct wmi_mac_addr bssid_list[0];
+} __packed;
+
+struct wmi_ie_data {
+ __le32 tag; /* WMI_IE_TAG */
+ __le32 ie_len;
+ u8 ie_data[0];
+} __packed;
+
+struct wmi_ssid {
+ __le32 ssid_len;
+ u8 ssid[32];
+} __packed;
+
+struct wmi_ssid_list {
+ __le32 tag; /* WMI_SSID_LIST_TAG */
+ __le32 num_ssids;
+ struct wmi_ssid ssids[0];
+} __packed;
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+struct wmi_start_scan_common {
+ /* Scan ID */
+ __le32 scan_id;
+ /* Scan requestor ID */
+ __le32 scan_req_id;
+ /* VDEV id(interface) that is requesting scan */
+ __le32 vdev_id;
+ /* Scan Priority, input to scan scheduler */
+ __le32 scan_priority;
+ /* Scan events subscription */
+ __le32 notify_scan_events;
+ /* dwell time in msec on active channels */
+ __le32 dwell_time_active;
+ /* dwell time in msec on passive channels */
+ __le32 dwell_time_passive;
+ /*
+ * min time in msec on the BSS channel,only valid if atleast one
+ * VDEV is active
+ */
+ __le32 min_rest_time;
+ /*
+ * max rest time in msec on the BSS channel,only valid if at least
+ * one VDEV is active
+ */
+ /*
+ * the scanner will rest on the bss channel at least min_rest_time
+ * after min_rest_time the scanner will start checking for tx/rx
+ * activity on all VDEVs. if there is no activity the scanner will
+ * switch to off channel. if there is activity the scanner will let
+ * the radio on the bss channel until max_rest_time expires.at
+ * max_rest_time scanner will switch to off channel irrespective of
+ * activity. activity is determined by the idle_time parameter.
+ */
+ __le32 max_rest_time;
+ /*
+ * time before sending next set of probe requests.
+ * The scanner keeps repeating probe requests transmission with
+ * period specified by repeat_probe_time.
+ * The number of probe requests specified depends on the ssid_list
+ * and bssid_list
+ */
+ __le32 repeat_probe_time;
+ /* time in msec between 2 consequetive probe requests with in a set. */
+ __le32 probe_spacing_time;
+ /*
+ * data inactivity time in msec on bss channel that will be used by
+ * scanner for measuring the inactivity.
+ */
+ __le32 idle_time;
+ /* maximum time in msec allowed for scan */
+ __le32 max_scan_time;
+ /*
+ * delay in msec before sending first probe request after switching
+ * to a channel
+ */
+ __le32 probe_delay;
+ /* Scan control flags */
+ __le32 scan_ctrl_flags;
+} __packed;
+
+struct wmi_start_scan_tlvs {
+ /* TLV parameters. These includes channel list, ssid list, bssid list,
+ * extra ies.
+ */
+ u8 tlvs[0];
+} __packed;
+
+struct wmi_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ __le32 burst_duration_ms;
+ struct wmi_start_scan_tlvs tlvs;
+} __packed;
+
+/* This is the definition from 10.X firmware branch */
+struct wmi_10x_start_scan_cmd {
+ struct wmi_start_scan_common common;
+ struct wmi_start_scan_tlvs tlvs;
+} __packed;
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+ u32 burst_duration_ms;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u16 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+};
+
+/* scan control flags */
+
+/* passively scan all channels including active channels */
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+/* add wild card ssid probe request even though ssid_list is specified. */
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+/* add cck rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+/* add ofdm rates to rates/xrate ie for the generated probe request */
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+/* To enable indication of Chan load and Noise floor to host */
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+/* Filter Probe request frames */
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+/* When set, DFS channels will not be scanned */
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+/* Different FW scan engine may choose to bail out on errors.
+ * Allow the driver to have influence over that.
+ */
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+
+/* Use random MAC address for TA for Probe Request frame and add
+ * OUI specified by WMI_SCAN_PROB_REQ_OUI_CMDID to the Probe Request frame.
+ * if OUI is not set by WMI_SCAN_PROB_REQ_OUI_CMDID then the flag is ignored.
+ */
+#define WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ 0x1000
+
+/* WMI_SCAN_CLASS_MASK must be the same value as IEEE80211_SCAN_CLASS_MASK */
+#define WMI_SCAN_CLASS_MASK 0xFF000000
+
+enum wmi_stop_scan_type {
+ WMI_SCAN_STOP_ONE = 0x00000000, /* stop by scan_id */
+ WMI_SCAN_STOP_VDEV_ALL = 0x01000000, /* stop by vdev_id */
+ WMI_SCAN_STOP_ALL = 0x04000000, /* stop all scans */
+};
+
+struct wmi_stop_scan_cmd {
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 req_type;
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_stop_scan_arg {
+ u32 req_id;
+ enum wmi_stop_scan_type req_type;
+ union {
+ u32 scan_id;
+ u32 vdev_id;
+ } u;
+};
+
+struct wmi_scan_chan_list_cmd {
+ __le32 num_scan_chans;
+ struct wmi_channel chan_info[0];
+} __packed;
+
+struct wmi_scan_chan_list_arg {
+ u32 n_channels;
+ struct wmi_channel_arg *channels;
+};
+
+enum wmi_bss_filter {
+ WMI_BSS_FILTER_NONE = 0, /* no beacons forwarded */
+ WMI_BSS_FILTER_ALL, /* all beacons forwarded */
+ WMI_BSS_FILTER_PROFILE, /* only beacons matching profile */
+ WMI_BSS_FILTER_ALL_BUT_PROFILE, /* all but beacons matching profile */
+ WMI_BSS_FILTER_CURRENT_BSS, /* only beacons matching current BSS */
+ WMI_BSS_FILTER_ALL_BUT_BSS, /* all but beacons matching BSS */
+ WMI_BSS_FILTER_PROBED_SSID, /* beacons matching probed ssid */
+ WMI_BSS_FILTER_LAST_BSS, /* marker only */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHANNEL_EXIT = BIT(8),
+ WMI_SCAN_EVENT_MAX = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_scan_event {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * This defines how much headroom is kept in the
+ * receive frame between the descriptor and the
+ * payload, in order for the WMI PHY error and
+ * management handler to insert header contents.
+ *
+ * This is in bytes.
+ */
+#define WMI_MGMT_RX_HDR_HEADROOM 52
+
+/*
+ * This event will be used for sending scan results
+ * as well as rx mgmt frames to the host. The rx buffer
+ * will be sent as part of this WMI event. It would be a
+ * good idea to pass all the fields in the RX status
+ * descriptor up to the host.
+ */
+struct wmi_mgmt_rx_hdr_v1 {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status; /* %WMI_RX_STATUS_ */
+} __packed;
+
+struct wmi_mgmt_rx_hdr_v2 {
+ struct wmi_mgmt_rx_hdr_v1 v1;
+ __le32 rssi_ctl[4];
+} __packed;
+
+struct wmi_mgmt_rx_event_v1 {
+ struct wmi_mgmt_rx_hdr_v1 hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_event_v2 {
+ struct wmi_mgmt_rx_hdr_v2 hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_10_4_mgmt_rx_hdr {
+ __le32 channel;
+ __le32 snr;
+ u8 rssi_ctl[4];
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status;
+} __packed;
+
+struct wmi_10_4_mgmt_rx_event {
+ struct wmi_10_4_mgmt_rx_hdr hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_mgmt_rx_ext_info {
+ __le64 rx_mac_timestamp;
+} __packed __aligned(4);
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+/* Extension data at the end of mgmt frame */
+#define WMI_RX_STATUS_EXT_INFO 0x40
+
+#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26
+#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24
+#define PHY_ERROR_GEN_RADAR 0x05
+
+#define PHY_ERROR_10_4_RADAR_MASK 0x4
+#define PHY_ERROR_10_4_SPECTRAL_SCAN_MASK 0x4000000
+
+enum phy_err_type {
+ PHY_ERROR_UNKNOWN,
+ PHY_ERROR_SPECTRAL_SCAN,
+ PHY_ERROR_FALSE_RADAR_EXT,
+ PHY_ERROR_RADAR
+};
+
+struct wmi_phyerr {
+ __le32 tsf_timestamp;
+ __le16 freq1;
+ __le16 freq2;
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u8 rsvd0;
+ __le32 rssi_chains[4];
+ __le16 nf_chains[4];
+ __le32 buf_len;
+ u8 buf[0];
+} __packed;
+
+struct wmi_phyerr_event {
+ __le32 num_phyerrs;
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ struct wmi_phyerr phyerrs[0];
+} __packed;
+
+struct wmi_10_4_phyerr_event {
+ __le32 tsf_l32;
+ __le32 tsf_u32;
+ __le16 freq1;
+ __le16 freq2;
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u8 rsvd0;
+ __le32 rssi_chains[4];
+ __le16 nf_chains[4];
+ __le32 phy_err_mask[2];
+ __le32 tsf_timestamp;
+ __le32 buf_len;
+ u8 buf[0];
+} __packed;
+
+struct wmi_radar_found_info {
+ __le32 pri_min;
+ __le32 pri_max;
+ __le32 width_min;
+ __le32 width_max;
+ __le32 sidx_min;
+ __le32 sidx_max;
+} __packed;
+
+enum wmi_radar_confirmation_status {
+ /* Detected radar was due to SW pulses */
+ WMI_SW_RADAR_DETECTED = 0,
+
+ WMI_RADAR_DETECTION_FAIL = 1,
+
+ /* Real radar detected */
+ WMI_HW_RADAR_DETECTED = 2,
+};
+
+#define PHYERR_TLV_SIG 0xBB
+#define PHYERR_TLV_TAG_SEARCH_FFT_REPORT 0xFB
+#define PHYERR_TLV_TAG_RADAR_PULSE_SUMMARY 0xF8
+#define PHYERR_TLV_TAG_SPECTRAL_SUMMARY_REPORT 0xF9
+
+struct phyerr_radar_report {
+ __le32 reg0; /* RADAR_REPORT_REG0_* */
+ __le32 reg1; /* RADAR_REPORT_REG1_* */
+} __packed;
+
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_MASK 0x80000000
+#define RADAR_REPORT_REG0_PULSE_IS_CHIRP_LSB 31
+
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_MASK 0x40000000
+#define RADAR_REPORT_REG0_PULSE_IS_MAX_WIDTH_LSB 30
+
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_MASK 0x3FF00000
+#define RADAR_REPORT_REG0_AGC_TOTAL_GAIN_LSB 20
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_MASK 0x000F0000
+#define RADAR_REPORT_REG0_PULSE_DELTA_DIFF_LSB 16
+
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_MASK 0x0000FC00
+#define RADAR_REPORT_REG0_PULSE_DELTA_PEAK_LSB 10
+
+#define RADAR_REPORT_REG0_PULSE_SIDX_MASK 0x000003FF
+#define RADAR_REPORT_REG0_PULSE_SIDX_LSB 0
+
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_MASK 0x80000000
+#define RADAR_REPORT_REG1_PULSE_SRCH_FFT_VALID_LSB 31
+
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_MASK 0x7F000000
+#define RADAR_REPORT_REG1_PULSE_AGC_MB_GAIN_LSB 24
+
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_MASK 0x00FF0000
+#define RADAR_REPORT_REG1_PULSE_SUBCHAN_MASK_LSB 16
+
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_MASK 0x0000FF00
+#define RADAR_REPORT_REG1_PULSE_TSF_OFFSET_LSB 8
+
+#define RADAR_REPORT_REG1_PULSE_DUR_MASK 0x000000FF
+#define RADAR_REPORT_REG1_PULSE_DUR_LSB 0
+
+struct phyerr_fft_report {
+ __le32 reg0; /* SEARCH_FFT_REPORT_REG0_ * */
+ __le32 reg1; /* SEARCH_FFT_REPORT_REG1_ * */
+} __packed;
+
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_MASK 0xFF800000
+#define SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB_LSB 23
+
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_MASK 0x007FC000
+#define SEARCH_FFT_REPORT_REG0_BASE_PWR_DB_LSB 14
+
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_MASK 0x00003000
+#define SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX_LSB 12
+
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_MASK 0x00000FFF
+#define SEARCH_FFT_REPORT_REG0_PEAK_SIDX_LSB 0
+
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_MASK 0xFC000000
+#define SEARCH_FFT_REPORT_REG1_RELPWR_DB_LSB 26
+
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_MASK 0x03FC0000
+#define SEARCH_FFT_REPORT_REG1_AVGPWR_DB_LSB 18
+
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_MASK 0x0003FF00
+#define SEARCH_FFT_REPORT_REG1_PEAK_MAG_LSB 8
+
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_MASK 0x000000FF
+#define SEARCH_FFT_REPORT_REG1_NUM_STR_BINS_IB_LSB 0
+
+struct phyerr_tlv {
+ __le16 len;
+ u8 tag;
+ u8 sig;
+} __packed;
+
+#define DFS_RSSI_POSSIBLY_FALSE 50
+#define DFS_PEAK_MAG_THOLD_POSSIBLY_FALSE 40
+
+struct wmi_mgmt_tx_hdr {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 tx_rate;
+ __le32 tx_power;
+ __le32 buf_len;
+} __packed;
+
+struct wmi_mgmt_tx_cmd {
+ struct wmi_mgmt_tx_hdr hdr;
+ u8 buf[0];
+} __packed;
+
+struct wmi_echo_event {
+ __le32 value;
+} __packed;
+
+struct wmi_echo_cmd {
+ __le32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+} __packed;
+
+enum wmi_dfs_region {
+ /* Uninitialized dfs domain */
+ WMI_UNINIT_DFS_DOMAIN = 0,
+
+ /* FCC3 dfs domain */
+ WMI_FCC_DFS_DOMAIN = 1,
+
+ /* ETSI dfs domain */
+ WMI_ETSI_DFS_DOMAIN = 2,
+
+ /*Japan dfs domain */
+ WMI_MKK4_DFS_DOMAIN = 3,
+};
+
+struct wmi_pdev_set_regdomain_cmd_10x {
+ __le32 reg_domain;
+ __le32 reg_domain_2G;
+ __le32 reg_domain_5G;
+ __le32 conformance_test_limit_2G;
+ __le32 conformance_test_limit_5G;
+
+ /* dfs domain from wmi_dfs_region */
+ __le32 dfs_domain;
+} __packed;
+
+/* Command to set/unset chip in quiet mode */
+struct wmi_pdev_set_quiet_cmd {
+ /* period in TUs */
+ __le32 period;
+
+ /* duration in TUs */
+ __le32 duration;
+
+ /* offset in TUs */
+ __le32 next_start;
+
+ /* enable/disable */
+ __le32 enabled;
+} __packed;
+
+/*
+ * 802.11g protection mode.
+ */
+enum ath10k_protmode {
+ ATH10K_PROT_NONE = 0, /* no protection */
+ ATH10K_PROT_CTSONLY = 1, /* CTS to self */
+ ATH10K_PROT_RTSCTS = 2, /* RTS-CTS */
+};
+
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES,
+ WMI_RTSCTS_ACROSS_SW_RETRIES
+};
+
+#define WMI_RTSCTS_ENABLED 1
+#define WMI_RTSCTS_SET_MASK 0x0f
+#define WMI_RTSCTS_SET_LSB 0
+
+#define WMI_RTSCTS_PROFILE_MASK 0xf0
+#define WMI_RTSCTS_PROFILE_LSB 4
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_csa_event_ies_present_flag {
+ WMI_CSA_IE_PRESENT = 0x00000001,
+ WMI_XCSA_IE_PRESENT = 0x00000002,
+ WMI_WBW_IE_PRESENT = 0x00000004,
+ WMI_CSWARP_IE_PRESENT = 0x00000008,
+};
+
+/* wmi CSA receive event from beacon frame */
+struct wmi_csa_event {
+ __le32 i_fc_dur;
+ /* Bit 0-15: FC */
+ /* Bit 16-31: DUR */
+ struct wmi_mac_addr i_addr1;
+ struct wmi_mac_addr i_addr2;
+ __le32 csa_ie[2];
+ __le32 xcsa_ie[2];
+ __le32 wb_ie[2];
+ __le32 cswarp_ie;
+ __le32 ies_present_flag; /* wmi_csa_event_ies_present_flag */
+} __packed;
+
+/* the definition of different PDEV parameters */
+#define PDEV_DEFAULT_STATS_UPDATE_PERIOD 500
+#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
+#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
+
+struct wmi_pdev_param_map {
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 txpower_limit2g;
+ u32 txpower_limit5g;
+ u32 txpower_scale;
+ u32 beacon_gen_mode;
+ u32 beacon_tx_mode;
+ u32 resmgr_offchan_mode;
+ u32 protection_mode;
+ u32 dynamic_bw;
+ u32 non_agg_sw_retry_th;
+ u32 agg_sw_retry_th;
+ u32 sta_kickout_th;
+ u32 ac_aggrsize_scaling;
+ u32 ltr_enable;
+ u32 ltr_ac_latency_be;
+ u32 ltr_ac_latency_bk;
+ u32 ltr_ac_latency_vi;
+ u32 ltr_ac_latency_vo;
+ u32 ltr_ac_latency_timeout;
+ u32 ltr_sleep_override;
+ u32 ltr_rx_override;
+ u32 ltr_tx_activity_timeout;
+ u32 l1ss_enable;
+ u32 dsleep_enable;
+ u32 pcielp_txbuf_flush;
+ u32 pcielp_txbuf_watermark;
+ u32 pcielp_txbuf_tmo_en;
+ u32 pcielp_txbuf_tmo_value;
+ u32 pdev_stats_update_period;
+ u32 vdev_stats_update_period;
+ u32 peer_stats_update_period;
+ u32 bcnflt_stats_update_period;
+ u32 pmf_qos;
+ u32 arp_ac_override;
+ u32 dcs;
+ u32 ani_enable;
+ u32 ani_poll_period;
+ u32 ani_listen_period;
+ u32 ani_ofdm_level;
+ u32 ani_cck_level;
+ u32 dyntxchain;
+ u32 proxy_sta;
+ u32 idle_ps_config;
+ u32 power_gating_sleep;
+ u32 fast_channel_reset;
+ u32 burst_dur;
+ u32 burst_enable;
+ u32 cal_period;
+ u32 aggr_burst;
+ u32 rx_decap_mode;
+ u32 smart_antenna_default_antenna;
+ u32 igmpmld_override;
+ u32 igmpmld_tid;
+ u32 antenna_gain;
+ u32 rx_filter;
+ u32 set_mcast_to_ucast_tid;
+ u32 proxy_sta_mode;
+ u32 set_mcast2ucast_mode;
+ u32 set_mcast2ucast_buffer;
+ u32 remove_mcast2ucast_buffer;
+ u32 peer_sta_ps_statechg_enable;
+ u32 igmpmld_ac_override;
+ u32 block_interbss;
+ u32 set_disable_reset_cmdid;
+ u32 set_msdu_ttl_cmdid;
+ u32 set_ppdu_duration_cmdid;
+ u32 txbf_sound_period_cmdid;
+ u32 set_promisc_mode_cmdid;
+ u32 set_burst_mode_cmdid;
+ u32 en_stats;
+ u32 mu_group_policy;
+ u32 noise_detection;
+ u32 noise_threshold;
+ u32 dpd_enable;
+ u32 set_mcast_bcast_echo;
+ u32 atf_strict_sch;
+ u32 atf_sched_duration;
+ u32 ant_plzn;
+ u32 mgmt_retry_limit;
+ u32 sensitivity_level;
+ u32 signed_txpower_2g;
+ u32 signed_txpower_5g;
+ u32 enable_per_tid_amsdu;
+ u32 enable_per_tid_ampdu;
+ u32 cca_threshold;
+ u32 rts_fixed_rate;
+ u32 pdev_reset;
+ u32 wapi_mbssid_offset;
+ u32 arp_srcaddr;
+ u32 arp_dstaddr;
+ u32 enable_btcoex;
+};
+
+#define WMI_PDEV_PARAM_UNSUPPORTED 0
+
+enum wmi_pdev_param {
+ /* TX chain mask */
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chain mask */
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off off chan mode. 1: turn on offchan mode
+ */
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ /*
+ * Dynamic bandwidth - 0: disable, 1: enable
+ *
+ * When enabled HW rate control tries different bandwidths when
+ * retransmitting frames.
+ */
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregrate/ 11g sw retry threshold.0-disable */
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregrate sw retry threshold. 0-disable*/
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ /* RX buffering flush enable */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ /* RX buffering matermark */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ /* RX buffering timeout enable */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ /* RX buffering timeout value */
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ /* pdev level stats update period in ms */
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP frames are sent */
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable proxy STA */
+ WMI_PDEV_PARAM_PROXY_STA,
+ /* Enable/Disable low power state when all VDEVs are inactive/idle. */
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ /* Enable/Disable power gating sleep */
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+};
+
+enum wmi_10x_pdev_param {
+ /* TX chian mask */
+ WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ /* RX chian mask */
+ WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
+ /* TX power limit for 2G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
+ /* TX power limit for 5G Radio */
+ WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
+ /* TX power scale */
+ WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
+ /* Beacon generation mode . 0: host, 1: target */
+ WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
+ /* Beacon generation mode . 0: staggered 1: bursted */
+ WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
+ /*
+ * Resource manager off chan mode .
+ * 0: turn off off chan mode. 1: turn on offchan mode
+ */
+ WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ /*
+ * Protection mode:
+ * 0: no protection 1:use CTS-to-self 2: use RTS/CTS
+ */
+ WMI_10X_PDEV_PARAM_PROTECTION_MODE,
+ /* Dynamic bandwidth 0: disable 1: enable */
+ WMI_10X_PDEV_PARAM_DYNAMIC_BW,
+ /* Non aggregrate/ 11g sw retry threshold.0-disable */
+ WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ /* aggregrate sw retry threshold. 0-disable*/
+ WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
+ /* Station kickout threshold (non of consecutive failures).0-disable */
+ WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
+ /* Aggerate size scaling configuration per AC */
+ WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ /* LTR enable */
+ WMI_10X_PDEV_PARAM_LTR_ENABLE,
+ /* LTR latency for BE, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ /* LTR latency for BK, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ /* LTR latency for VI, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ /* LTR latency for VO, in us */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ /* LTR AC latency timeout, in ms */
+ WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ /* LTR platform latency override, in us */
+ WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ /* LTR-RX override, in us */
+ WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
+ /* Tx activity timeout for LTR, in us */
+ WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ /* L1SS state machine enable */
+ WMI_10X_PDEV_PARAM_L1SS_ENABLE,
+ /* Deep sleep state machine enable */
+ WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
+ /* pdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ /* vdev level stats update period in ms */
+ WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ /* peer level stats update period in ms */
+ WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ /* beacon filter status update period */
+ WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ /* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
+ WMI_10X_PDEV_PARAM_PMF_QOS,
+ /* Access category on which ARP and DHCP frames are sent */
+ WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
+ /* DCS configuration */
+ WMI_10X_PDEV_PARAM_DCS,
+ /* Enable/Disable ANI on target */
+ WMI_10X_PDEV_PARAM_ANI_ENABLE,
+ /* configure the ANI polling period */
+ WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
+ /* configure the ANI listening period */
+ WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ /* configure OFDM immunity level */
+ WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
+ /* configure CCK immunity level */
+ WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
+ /* Enable/Disable CDD for 1x1 STAs in rate control module */
+ WMI_10X_PDEV_PARAM_DYNTXCHAIN,
+ /* Enable/Disable Fast channel reset*/
+ WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
+ /* Set Bursting DUR */
+ WMI_10X_PDEV_PARAM_BURST_DUR,
+ /* Set Bursting Enable*/
+ WMI_10X_PDEV_PARAM_BURST_ENABLE,
+
+ /* following are available as of firmware 10.2 */
+ WMI_10X_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_10X_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ WMI_10X_PDEV_PARAM_IGMPMLD_TID,
+ WMI_10X_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_10X_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_10X_PDEV_PARAM_RX_FILTER,
+ WMI_10X_PDEV_PARAM_SET_MCAST_TO_UCAST_TID,
+ WMI_10X_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_10X_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_10X_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_10X_PDEV_PARAM_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_10X_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10X_PDEV_PARAM_CAL_PERIOD,
+ WMI_10X_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_10X_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_10X_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_10X_PDEV_PARAM_PDEV_RESET
+};
+
+enum wmi_10_4_pdev_param {
+ WMI_10_4_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_10_4_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_10_4_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_10_4_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_10_4_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_10_4_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_10_4_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_10_4_PDEV_PARAM_PROTECTION_MODE,
+ WMI_10_4_PDEV_PARAM_DYNAMIC_BW,
+ WMI_10_4_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_10_4_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_10_4_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_10_4_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_10_4_PDEV_PARAM_LTR_ENABLE,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_10_4_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_10_4_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_10_4_PDEV_PARAM_L1SS_ENABLE,
+ WMI_10_4_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_10_4_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_10_4_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_10_4_PDEV_PARAM_PMF_QOS,
+ WMI_10_4_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_DCS,
+ WMI_10_4_PDEV_PARAM_ANI_ENABLE,
+ WMI_10_4_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_10_4_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_10_4_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_10_4_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_10_4_PDEV_PARAM_DYNTXCHAIN,
+ WMI_10_4_PDEV_PARAM_PROXY_STA,
+ WMI_10_4_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_10_4_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_10_4_PDEV_PARAM_AGGR_BURST,
+ WMI_10_4_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_10_4_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_10_4_PDEV_PARAM_BURST_DUR,
+ WMI_10_4_PDEV_PARAM_BURST_ENABLE,
+ WMI_10_4_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_TID,
+ WMI_10_4_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_10_4_PDEV_PARAM_RX_FILTER,
+ WMI_10_4_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_10_4_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_10_4_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_10_4_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_10_4_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_10_4_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_10_4_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_10_4_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_10_4_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_10_4_PDEV_PARAM_EN_STATS,
+ WMI_10_4_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_10_4_PDEV_PARAM_NOISE_DETECTION,
+ WMI_10_4_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_10_4_PDEV_PARAM_DPD_ENABLE,
+ WMI_10_4_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_10_4_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_10_4_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_10_4_PDEV_PARAM_ANT_PLZN,
+ WMI_10_4_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_10_4_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_10_4_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_10_4_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_10_4_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_10_4_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10_4_PDEV_PARAM_CAL_PERIOD,
+ WMI_10_4_PDEV_PARAM_PDEV_RESET,
+ WMI_10_4_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_10_4_PDEV_PARAM_ARP_SRCADDR,
+ WMI_10_4_PDEV_PARAM_ARP_DSTADDR,
+ WMI_10_4_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_10_4_PDEV_PARAM_RX_BATCHMODE,
+ WMI_10_4_PDEV_PARAM_PACKET_AGGR_DELAY,
+ WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_10_4_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_10_4_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_10_4_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_10_4_PDEV_PARAM_ATF_SSID_GROUP_POLICY,
+ WMI_10_4_PDEV_PARAM_ENABLE_BTCOEX,
+};
+
+struct wmi_pdev_set_param_cmd {
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+/* valid period is 1 ~ 60000ms, unit in millisecond */
+#define WMI_PDEV_PARAM_CAL_PERIOD_MAX 60000
+
+struct wmi_pdev_get_tpc_config_cmd {
+ /* parameter */
+ __le32 param;
+} __packed;
+
+#define WMI_TPC_CONFIG_PARAM 1
+#define WMI_TPC_FINAL_RATE_MAX 240
+#define WMI_TPC_TX_N_CHAIN 4
+#define WMI_TPC_RATE_MAX (WMI_TPC_TX_N_CHAIN * 65)
+#define WMI_TPC_PREAM_TABLE_MAX 10
+#define WMI_TPC_FLAG 3
+#define WMI_TPC_BUF_SIZE 10
+#define WMI_TPC_BEAMFORMING 2
+
+enum wmi_tpc_table_type {
+ WMI_TPC_TABLE_TYPE_CDD = 0,
+ WMI_TPC_TABLE_TYPE_STBC = 1,
+ WMI_TPC_TABLE_TYPE_TXBF = 2,
+};
+
+enum wmi_tpc_config_event_flag {
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_CDD = 0x1,
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_STBC = 0x2,
+ WMI_TPC_CONFIG_EVENT_FLAG_TABLE_TXBF = 0x4,
+};
+
+struct wmi_pdev_tpc_config_event {
+ __le32 reg_domain;
+ __le32 chan_freq;
+ __le32 phy_mode;
+ __le32 twice_antenna_reduction;
+ __le32 twice_max_rd_power;
+ a_sle32 twice_antenna_gain;
+ __le32 power_limit;
+ __le32 rate_max;
+ __le32 num_tx_chain;
+ __le32 ctl;
+ __le32 flags;
+ s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ u8 rates_array[WMI_TPC_RATE_MAX];
+} __packed;
+
+/* Transmit power scale factor. */
+enum wmi_tp_scale {
+ WMI_TP_SCALE_MAX = 0, /* no scaling (default) */
+ WMI_TP_SCALE_50 = 1, /* 50% of max (-3 dBm) */
+ WMI_TP_SCALE_25 = 2, /* 25% of max (-6 dBm) */
+ WMI_TP_SCALE_12 = 3, /* 12% of max (-9 dBm) */
+ WMI_TP_SCALE_MIN = 4, /* min, but still on */
+ WMI_TP_SCALE_SIZE = 5, /* max num of enum */
+};
+
+struct wmi_pdev_tpc_final_table_event {
+ __le32 reg_domain;
+ __le32 chan_freq;
+ __le32 phy_mode;
+ __le32 twice_antenna_reduction;
+ __le32 twice_max_rd_power;
+ a_sle32 twice_antenna_gain;
+ __le32 power_limit;
+ __le32 rate_max;
+ __le32 num_tx_chain;
+ __le32 ctl;
+ __le32 flags;
+ s8 max_reg_allow_pow[WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agcdd[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agstbc[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ s8 max_reg_allow_pow_agtxbf[WMI_TPC_TX_N_CHAIN][WMI_TPC_TX_N_CHAIN];
+ u8 rates_array[WMI_TPC_FINAL_RATE_MAX];
+ u8 ctl_power_table[WMI_TPC_BEAMFORMING][WMI_TPC_TX_N_CHAIN]
+ [WMI_TPC_TX_N_CHAIN];
+} __packed;
+
+struct wmi_pdev_get_tpc_table_cmd {
+ __le32 param;
+} __packed;
+
+enum wmi_tpc_pream_2ghz {
+ WMI_TPC_PREAM_2GHZ_CCK = 0,
+ WMI_TPC_PREAM_2GHZ_OFDM,
+ WMI_TPC_PREAM_2GHZ_HT20,
+ WMI_TPC_PREAM_2GHZ_HT40,
+ WMI_TPC_PREAM_2GHZ_VHT20,
+ WMI_TPC_PREAM_2GHZ_VHT40,
+ WMI_TPC_PREAM_2GHZ_VHT80,
+};
+
+enum wmi_tpc_pream_5ghz {
+ WMI_TPC_PREAM_5GHZ_OFDM = 1,
+ WMI_TPC_PREAM_5GHZ_HT20,
+ WMI_TPC_PREAM_5GHZ_HT40,
+ WMI_TPC_PREAM_5GHZ_VHT20,
+ WMI_TPC_PREAM_5GHZ_VHT40,
+ WMI_TPC_PREAM_5GHZ_VHT80,
+ WMI_TPC_PREAM_5GHZ_HTCUP,
+};
+
+struct wmi_pdev_chanlist_update_event {
+ /* number of channels */
+ __le32 num_chan;
+ /* array of channels */
+ struct wmi_channel channel_list[1];
+} __packed;
+
+#define WMI_MAX_DEBUG_MESG (sizeof(u32) * 32)
+
+struct wmi_debug_mesg_event {
+ /* message buffer, NULL terminated */
+ char bufp[WMI_MAX_DEBUG_MESG];
+} __packed;
+
+enum {
+ /* P2P device */
+ VDEV_SUBTYPE_P2PDEV = 0,
+ /* P2P client */
+ VDEV_SUBTYPE_P2PCLI,
+ /* P2P GO */
+ VDEV_SUBTYPE_P2PGO,
+ /* BT3.0 HS */
+ VDEV_SUBTYPE_BT,
+};
+
+struct wmi_pdev_set_channel_cmd {
+ /* idnore power , only use flags , mode and freq */
+ struct wmi_channel chan;
+} __packed;
+
+struct wmi_pdev_pktlog_enable_cmd {
+ __le32 ev_bitmap;
+} __packed;
+
+/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
+#define WMI_DSCP_MAP_MAX (64)
+struct wmi_pdev_set_dscp_tid_map_cmd {
+ /* map indicating DSCP to TID conversion */
+ __le32 dscp_to_tid_map[WMI_DSCP_MAP_MAX];
+} __packed;
+
+enum mcast_bcast_rate_id {
+ WMI_SET_MCAST_RATE,
+ WMI_SET_BCAST_RATE
+};
+
+struct mcast_bcast_rate {
+ enum mcast_bcast_rate_id rate_id;
+ __le32 rate;
+} __packed;
+
+struct wmi_wmm_params {
+ __le32 cwmin;
+ __le32 cwmax;
+ __le32 aifs;
+ __le32 txop;
+ __le32 acm;
+ __le32 no_ack;
+} __packed;
+
+struct wmi_pdev_set_wmm_params {
+ struct wmi_wmm_params ac_be;
+ struct wmi_wmm_params ac_bk;
+ struct wmi_wmm_params ac_vi;
+ struct wmi_wmm_params ac_vo;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u32 cwmin;
+ u32 cwmax;
+ u32 aifs;
+ u32 txop;
+ u32 acm;
+ u32 no_ack;
+};
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+struct wmi_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ __le32 mpdus_requed;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev continous xretry */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ __le32 stateless_tid_alloc_failure;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+} __packed;
+
+struct wmi_10_4_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ __le32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ __le32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ __le32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ __le32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ __le32 wmm_drop;
+
+ /* Num Local frames queued */
+ __le32 local_enqued;
+
+ /* Num Local frames done */
+ __le32 local_freed;
+
+ /* Num queued to HW */
+ __le32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ __le32 hw_reaped;
+
+ /* Num underruns */
+ __le32 underrun;
+
+ /* HW Paused. */
+ __le32 hw_paused;
+
+ /* Num PPDUs cleaned up in TX abort */
+ __le32 tx_abort;
+
+ /* Num MPDUs requed by SW */
+ __le32 mpdus_requed;
+
+ /* excessive retries */
+ __le32 tx_ko;
+
+ /* data hw rate code */
+ __le32 data_rc;
+
+ /* Scheduler self triggers */
+ __le32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ __le32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ __le32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ __le32 pdev_cont_xretry;
+
+ /* wal pdev tx timeouts */
+ __le32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ __le32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ __le32 stateless_tid_alloc_failure;
+
+ __le32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ __le32 txop_ovf;
+
+ /* Number of Sequences posted */
+ __le32 seq_posted;
+
+ /* Number of Sequences failed queueing */
+ __le32 seq_failed_queueing;
+
+ /* Number of Sequences completed */
+ __le32 seq_completed;
+
+ /* Number of Sequences restarted */
+ __le32 seq_restarted;
+
+ /* Number of MU Sequences posted */
+ __le32 mu_seq_posted;
+
+ /* Num MPDUs flushed by SW, HWPAUSED,SW TXABORT(Reset,channel change) */
+ __le32 mpdus_sw_flush;
+
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ __le32 mpdus_hw_filter;
+
+ /* Num MPDUs truncated by PDG
+ * (TXOP, TBTT, PPDU_duration based on rate, dyn_bw)
+ */
+ __le32 mpdus_truncated;
+
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ __le32 mpdus_ack_failed;
+
+ /* Num MPDUs that was dropped due to expiry. */
+ __le32 mpdus_expired;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+ /* Cnts any change in ring routing mid-ppdu */
+ __le32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ __le32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ __le32 r0_frags;
+ __le32 r1_frags;
+ __le32 r2_frags;
+ __le32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ __le32 htt_msdus;
+ __le32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ __le32 loc_msdus;
+ __le32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ __le32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ __le32 phy_errs;
+
+ /* Number of PHY errors drops */
+ __le32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ __le32 mpdu_errs;
+} __packed;
+
+struct wmi_pdev_stats_peer {
+ /* REMOVE THIS ONCE REAL PEER STAT COUNTERS ARE ADDED */
+ __le32 dummy;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_STAT_PEER = BIT(0),
+ WMI_STAT_AP = BIT(1),
+ WMI_STAT_PDEV = BIT(2),
+ WMI_STAT_VDEV = BIT(3),
+ WMI_STAT_BCNFLT = BIT(4),
+ WMI_STAT_VDEV_RATE = BIT(5),
+};
+
+enum wmi_10_4_stats_id {
+ WMI_10_4_STAT_PEER = BIT(0),
+ WMI_10_4_STAT_AP = BIT(1),
+ WMI_10_4_STAT_INST = BIT(2),
+ WMI_10_4_STAT_PEER_EXTD = BIT(3),
+ WMI_10_4_STAT_VDEV_EXTD = BIT(4),
+};
+
+struct wlan_inst_rssi_args {
+ __le16 cfg_retry_count;
+ __le16 retry_count;
+};
+
+struct wmi_request_stats_cmd {
+ __le32 stats_id;
+
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* Instantaneous RSSI arguments */
+ struct wlan_inst_rssi_args inst_rssi_args;
+} __packed;
+
+/* Suspend option */
+enum {
+ /* suspend */
+ WMI_PDEV_SUSPEND,
+
+ /* suspend and disable all interrupts */
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct wmi_pdev_suspend_cmd {
+ /* suspend option sent to target */
+ __le32 suspend_opt;
+} __packed;
+
+struct wmi_stats_event {
+ __le32 stats_id; /* WMI_STAT_ */
+ /*
+ * number of pdev stats event structures
+ * (wmi_pdev_stats) 0 or 1
+ */
+ __le32 num_pdev_stats;
+ /*
+ * number of vdev stats event structures
+ * (wmi_vdev_stats) 0 or max vdevs
+ */
+ __le32 num_vdev_stats;
+ /*
+ * number of peer stats event structures
+ * (wmi_peer_stats) 0 or max peers
+ */
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ /*
+ * followed by
+ * num_pdev_stats * size of(struct wmi_pdev_stats)
+ * num_vdev_stats * size of(struct wmi_vdev_stats)
+ * num_peer_stats * size of(struct wmi_peer_stats)
+ *
+ * By having a zero sized array, the pointer to data area
+ * becomes available without increasing the struct size
+ */
+ u8 data[0];
+} __packed;
+
+struct wmi_10_2_stats_event {
+ __le32 stats_id; /* %WMI_REQUEST_ */
+ __le32 num_pdev_stats;
+ __le32 num_pdev_ext_stats;
+ __le32 num_vdev_stats;
+ __le32 num_peer_stats;
+ __le32 num_bcnflt_stats;
+ u8 data[0];
+} __packed;
+
+/*
+ * PDEV statistics
+ * TODO: add all PDEV stats here
+ */
+struct wmi_pdev_stats_base {
+ __le32 chan_nf;
+ __le32 tx_frame_count; /* Cycles spent transmitting frames */
+ __le32 rx_frame_count; /* Cycles spent receiving frames */
+ __le32 rx_clear_count; /* Total channel busy time, evidently */
+ __le32 cycle_count; /* Total on-channel time */
+ __le32 phy_err_count;
+ __le32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ struct wmi_pdev_stats_peer peer;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+ __le32 ack_rx_bad;
+ __le32 rts_bad;
+ __le32 rts_good;
+ __le32 fcs_bad;
+ __le32 no_beacons;
+ __le32 mib_int_count;
+} __packed;
+
+struct wmi_10x_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ struct wmi_pdev_stats_peer peer;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
+struct wmi_pdev_stats_mem {
+ __le32 dram_free;
+ __le32 iram_free;
+} __packed;
+
+struct wmi_10_2_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ __le32 mc_drop;
+ struct wmi_pdev_stats_rx rx;
+ __le32 pdev_rx_timeout;
+ struct wmi_pdev_stats_mem mem;
+ struct wmi_pdev_stats_peer peer;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
+struct wmi_10_4_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_10_4_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+ __le32 rx_ovfl_errs;
+ struct wmi_pdev_stats_mem mem;
+ __le32 sram_free_size;
+ struct wmi_pdev_stats_extra extra;
+} __packed;
+
+/*
+ * VDEV statistics
+ */
+
+#define WMI_VDEV_STATS_FTM_COUNT_VALID BIT(31)
+#define WMI_VDEV_STATS_FTM_COUNT_LSB 0
+#define WMI_VDEV_STATS_FTM_COUNT_MASK 0x7fffffff
+
+struct wmi_vdev_stats {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_stats_extd {
+ __le32 vdev_id;
+ __le32 ppdu_aggr_cnt;
+ __le32 ppdu_noack;
+ __le32 mpdu_queued;
+ __le32 ppdu_nonaggr_cnt;
+ __le32 mpdu_sw_requeued;
+ __le32 mpdu_suc_retry;
+ __le32 mpdu_suc_multitry;
+ __le32 mpdu_fail_retry;
+ __le32 tx_ftm_suc;
+ __le32 tx_ftm_suc_retry;
+ __le32 tx_ftm_fail;
+ __le32 rx_ftmr_cnt;
+ __le32 rx_ftmr_dup_cnt;
+ __le32 rx_iftmr_cnt;
+ __le32 rx_iftmr_dup_cnt;
+ __le32 reserved[6];
+} __packed;
+
+/*
+ * peer statistics.
+ * TODO: add more stats
+ */
+struct wmi_peer_stats {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_rssi;
+ __le32 peer_tx_rate;
+} __packed;
+
+struct wmi_10x_peer_stats {
+ struct wmi_peer_stats old;
+ __le32 peer_rx_rate;
+} __packed;
+
+struct wmi_10_2_peer_stats {
+ struct wmi_peer_stats old;
+ __le32 peer_rx_rate;
+ __le32 current_per;
+ __le32 retries;
+ __le32 tx_rate_count;
+ __le32 max_4ms_frame_len;
+ __le32 total_sub_frames;
+ __le32 tx_bytes;
+ __le32 num_pkt_loss_overflow[4];
+ __le32 num_pkt_loss_excess_retry[4];
+} __packed;
+
+struct wmi_10_2_4_peer_stats {
+ struct wmi_10_2_peer_stats common;
+ __le32 peer_rssi_changed;
+} __packed;
+
+struct wmi_10_2_4_ext_peer_stats {
+ struct wmi_10_2_peer_stats common;
+ __le32 peer_rssi_changed;
+ __le32 rx_duration;
+} __packed;
+
+struct wmi_10_4_peer_stats {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_rssi;
+ __le32 peer_rssi_seq_num;
+ __le32 peer_tx_rate;
+ __le32 peer_rx_rate;
+ __le32 current_per;
+ __le32 retries;
+ __le32 tx_rate_count;
+ __le32 max_4ms_frame_len;
+ __le32 total_sub_frames;
+ __le32 tx_bytes;
+ __le32 num_pkt_loss_overflow[4];
+ __le32 num_pkt_loss_excess_retry[4];
+ __le32 peer_rssi_changed;
+} __packed;
+
+struct wmi_10_4_peer_extd_stats {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 inactive_time;
+ __le32 peer_chain_rssi;
+ __le32 rx_duration;
+ __le32 reserved[10];
+} __packed;
+
+struct wmi_10_4_bss_bcn_stats {
+ __le32 vdev_id;
+ __le32 bss_bcns_dropped;
+ __le32 bss_bcn_delivered;
+} __packed;
+
+struct wmi_10_4_bss_bcn_filter_stats {
+ __le32 bcns_dropped;
+ __le32 bcns_delivered;
+ __le32 active_filters;
+ struct wmi_10_4_bss_bcn_stats bss_stats;
+} __packed;
+
+struct wmi_10_2_pdev_ext_stats {
+ __le32 rx_rssi_comb;
+ __le32 rx_rssi[4];
+ __le32 rx_mcs[10];
+ __le32 tx_mcs[10];
+ __le32 ack_rssi;
+} __packed;
+
+struct wmi_vdev_create_cmd {
+ __le32 vdev_id;
+ __le32 vdev_type;
+ __le32 vdev_subtype;
+ struct wmi_mac_addr vdev_macaddr;
+} __packed;
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+};
+
+enum wmi_vdev_subtype_legacy {
+ WMI_VDEV_SUBTYPE_LEGACY_NONE = 0,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_LEGACY_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_LEGACY_PROXY_STA = 4,
+};
+
+enum wmi_vdev_subtype_10_2_4 {
+ WMI_VDEV_SUBTYPE_10_2_4_NONE = 0,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_10_2_4_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_10_2_4_PROXY_STA = 4,
+ WMI_VDEV_SUBTYPE_10_2_4_MESH_11S = 5,
+};
+
+enum wmi_vdev_subtype_10_4 {
+ WMI_VDEV_SUBTYPE_10_4_NONE = 0,
+ WMI_VDEV_SUBTYPE_10_4_P2P_DEV = 1,
+ WMI_VDEV_SUBTYPE_10_4_P2P_CLI = 2,
+ WMI_VDEV_SUBTYPE_10_4_P2P_GO = 3,
+ WMI_VDEV_SUBTYPE_10_4_PROXY_STA = 4,
+ WMI_VDEV_SUBTYPE_10_4_MESH_NON_11S = 5,
+ WMI_VDEV_SUBTYPE_10_4_MESH_11S = 6,
+};
+
+/* values for vdev_subtype */
+
+/* values for vdev_start_request flags */
+/*
+ * Indicates that AP VDEV uses hidden ssid. only valid for
+ * AP/GO
+ */
+#define WMI_VDEV_START_HIDDEN_SSID (1 << 0)
+/*
+ * Indicates if robust management frame/management frame
+ * protection is enabled. For GO/AP vdevs, it indicates that
+ * it may support station/client associations with RMF enabled.
+ * For STA/client vdevs, it indicates that sta will
+ * associate with AP with RMF enabled.
+ */
+#define WMI_VDEV_START_PMF_ENABLED (1 << 1)
+
+struct wmi_p2p_noa_descriptor {
+ __le32 type_count; /* 255: continuous schedule, 0: reserved */
+ __le32 duration; /* Absent period duration in micro seconds */
+ __le32 interval; /* Absent period interval in micro seconds */
+ __le32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+struct wmi_vdev_start_request_cmd {
+ /* WMI channel */
+ struct wmi_channel chan;
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* requestor id identifying the caller module */
+ __le32 requestor_id;
+ /* beacon interval from received beacon */
+ __le32 beacon_interval;
+ /* DTIM Period from the received beacon */
+ __le32 dtim_period;
+ /* Flags */
+ __le32 flags;
+ /* ssid field. Only valid for AP/GO/IBSS/BTAmp VDEV type. */
+ struct wmi_ssid ssid;
+ /* beacon/probe response xmit rate. Applicable for SoftAP. */
+ __le32 bcn_tx_rate;
+ /* beacon/probe response xmit power. Applicable for SoftAP. */
+ __le32 bcn_tx_power;
+ /* number of p2p NOA descriptor(s) from scan entry */
+ __le32 num_noa_descriptors;
+ /*
+ * Disable H/W ack. This used by WMI_VDEV_RESTART_REQUEST_CMDID.
+ * During CAC, Our HW shouldn't ack ditected frames
+ */
+ __le32 disable_hw_ack;
+ /* actual p2p NOA descriptor from scan entry */
+ struct wmi_p2p_noa_descriptor noa_descriptors[2];
+} __packed;
+
+struct wmi_vdev_restart_request_cmd {
+ struct wmi_vdev_start_request_cmd vdev_start_request_cmd;
+} __packed;
+
+struct wmi_vdev_start_request_arg {
+ u32 vdev_id;
+ struct wmi_channel_arg channel;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+};
+
+struct wmi_vdev_delete_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ __le32 vdev_id;
+ __le32 vdev_assoc_id;
+ struct wmi_mac_addr vdev_bssid;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_standby_response_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_response_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_set_param_cmd {
+ __le32 vdev_id;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP 0x01
+#define WMI_KEY_TX_USAGE 0x02 /* default tx key - static wep */
+
+struct wmi_key_seq_counter {
+ __le32 key_seq_counter_l;
+ __le32 key_seq_counter_h;
+} __packed;
+
+#define WMI_CIPHER_NONE 0x0 /* clear key */
+#define WMI_CIPHER_WEP 0x1
+#define WMI_CIPHER_TKIP 0x2
+#define WMI_CIPHER_AES_OCB 0x3
+#define WMI_CIPHER_AES_CCM 0x4
+#define WMI_CIPHER_WAPI 0x5
+#define WMI_CIPHER_CKIP 0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+#define WMI_CIPHER_AES_GCM 0x8
+
+struct wmi_vdev_install_key_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 key_idx;
+ __le32 key_flags;
+ __le32 key_cipher; /* %WMI_CIPHER_ */
+ struct wmi_key_seq_counter key_rsc_counter;
+ struct wmi_key_seq_counter key_global_rsc_counter;
+ struct wmi_key_seq_counter key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ __le32 key_len;
+ __le32 key_txmic_len;
+ __le32 key_rxmic_len;
+
+ /* contains key followed by tx mic followed by rx mic */
+ u8 key_data[0];
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ const void *key_data;
+};
+
+/*
+ * vdev fixed rate format:
+ * - preamble - b7:b6 - see WMI_RATE_PREMABLE_
+ * - nss - b5:b4 - ss number (0 mean 1ss)
+ * - rate_mcs - b3:b0 - as below
+ * CCK: 0 - 11Mbps, 1 - 5,5Mbps, 2 - 2Mbps, 3 - 1Mbps,
+ * 4 - 11Mbps (s), 5 - 5,5Mbps (s), 6 - 2Mbps (s)
+ * OFDM: 0 - 48Mbps, 1 - 24Mbps, 2 - 12Mbps, 3 - 6Mbps,
+ * 4 - 54Mbps, 5 - 36Mbps, 6 - 18Mbps, 7 - 9Mbps
+ * HT/VHT: MCS index
+ */
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+};
+
+#define ATH10K_HW_NSS(rate) (1 + (((rate) >> 4) & 0x3))
+#define ATH10K_HW_PREAMBLE(rate) (((rate) >> 6) & 0x3)
+#define ATH10K_HW_MCS_RATE(rate) ((rate) & 0xf)
+#define ATH10K_HW_LEGACY_RATE(rate) ((rate) & 0x3f)
+#define ATH10K_HW_BW(flags) (((flags) >> 3) & 0x3)
+#define ATH10K_HW_GI(flags) (((flags) >> 5) & 0x1)
+#define ATH10K_HW_RATECODE(rate, nss, preamble) \
+ (((preamble) << 6) | ((nss) << 4) | (rate))
+
+#define VHT_MCS_NUM 10
+#define VHT_BW_NUM 4
+#define VHT_NSS_NUM 4
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xff)
+
+struct wmi_vdev_param_map {
+ u32 rts_threshold;
+ u32 fragmentation_threshold;
+ u32 beacon_interval;
+ u32 listen_interval;
+ u32 multicast_rate;
+ u32 mgmt_tx_rate;
+ u32 slot_time;
+ u32 preamble;
+ u32 swba_time;
+ u32 wmi_vdev_stats_update_period;
+ u32 wmi_vdev_pwrsave_ageout_time;
+ u32 wmi_vdev_host_swba_interval;
+ u32 dtim_period;
+ u32 wmi_vdev_oc_scheduler_air_time_limit;
+ u32 wds;
+ u32 atim_window;
+ u32 bmiss_count_max;
+ u32 bmiss_first_bcnt;
+ u32 bmiss_final_bcnt;
+ u32 feature_wmm;
+ u32 chwidth;
+ u32 chextoffset;
+ u32 disable_htprotection;
+ u32 sta_quickkickout;
+ u32 mgmt_rate;
+ u32 protection_mode;
+ u32 fixed_rate;
+ u32 sgi;
+ u32 ldpc;
+ u32 tx_stbc;
+ u32 rx_stbc;
+ u32 intra_bss_fwd;
+ u32 def_keyid;
+ u32 nss;
+ u32 bcast_data_rate;
+ u32 mcast_data_rate;
+ u32 mcast_indicate;
+ u32 dhcp_indicate;
+ u32 unknown_dest_indicate;
+ u32 ap_keepalive_min_idle_inactive_time_secs;
+ u32 ap_keepalive_max_idle_inactive_time_secs;
+ u32 ap_keepalive_max_unresponsive_time_secs;
+ u32 ap_enable_nawds;
+ u32 mcast2ucast_set;
+ u32 enable_rtscts;
+ u32 txbf;
+ u32 packet_powersave;
+ u32 drop_unencry;
+ u32 tx_encap_type;
+ u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
+ u32 rc_num_retries;
+ u32 cabq_maxdur;
+ u32 mfptest_set;
+ u32 rts_fixed_rate;
+ u32 vht_sgimask;
+ u32 vht80_ratemask;
+ u32 early_rx_adjust_enable;
+ u32 early_rx_tgt_bmiss_num;
+ u32 early_rx_bmiss_sample_cycle;
+ u32 early_rx_slop_step;
+ u32 early_rx_init_slop;
+ u32 early_rx_adjust_pause;
+ u32 proxy_sta;
+ u32 meru_vc;
+ u32 rx_decap_type;
+ u32 bw_nss_ratemask;
+ u32 inc_tsf;
+ u32 dec_tsf;
+};
+
+#define WMI_VDEV_PARAM_UNSUPPORTED 0
+
+/* the definition of different VDEV parameters */
+enum wmi_vdev_param {
+ /* RTS Threshold */
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ /* multicast rate in Mbps */
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/dsiable WDS for this VDEV */
+ WMI_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* BMISS first time */
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ /* BMISS final time */
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ /* WMM enables/disabled */
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_STA_KICKOUT event to the host so the STA can be deleted.
+ */
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ /* Enable/Disable RTS-CTS */
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ /* Enable TXBFee/er */
+ WMI_VDEV_PARAM_TXBF,
+
+ /* Set packet power save */
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+
+ /*
+ * Drops un-encrypted packets if eceived in an encrypted connection
+ * otherwise forwards to host.
+ */
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+
+ /*
+ * Set the encapsulation type for frames.
+ */
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+};
+
+/* the definition of different VDEV parameters */
+enum wmi_10x_vdev_param {
+ /* RTS Threshold */
+ WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ /* Fragmentation threshold */
+ WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ /* beacon interval in TUs */
+ WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
+ /* Listen interval in TUs */
+ WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
+ /* multicast rate in Mbps */
+ WMI_10X_VDEV_PARAM_MULTICAST_RATE,
+ /* management frame rate in Mbps */
+ WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
+ /* slot time (long vs short) */
+ WMI_10X_VDEV_PARAM_SLOT_TIME,
+ /* preamble (long vs short) */
+ WMI_10X_VDEV_PARAM_PREAMBLE,
+ /* SWBA time (time before tbtt in msec) */
+ WMI_10X_VDEV_PARAM_SWBA_TIME,
+ /* time period for updating VDEV stats */
+ WMI_10X_VDEV_STATS_UPDATE_PERIOD,
+ /* age out time in msec for frames queued for station in power save */
+ WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
+ /*
+ * Host SWBA interval (time in msec before tbtt for SWBA event
+ * generation).
+ */
+ WMI_10X_VDEV_HOST_SWBA_INTERVAL,
+ /* DTIM period (specified in units of num beacon intervals) */
+ WMI_10X_VDEV_PARAM_DTIM_PERIOD,
+ /*
+ * scheduler air time limit for this VDEV. used by off chan
+ * scheduler.
+ */
+ WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ /* enable/dsiable WDS for this VDEV */
+ WMI_10X_VDEV_PARAM_WDS,
+ /* ATIM Window */
+ WMI_10X_VDEV_PARAM_ATIM_WINDOW,
+ /* BMISS max */
+ WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
+ /* WMM enables/disabled */
+ WMI_10X_VDEV_PARAM_FEATURE_WMM,
+ /* Channel width */
+ WMI_10X_VDEV_PARAM_CHWIDTH,
+ /* Channel Offset */
+ WMI_10X_VDEV_PARAM_CHEXTOFFSET,
+ /* Disable HT Protection */
+ WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
+ /* Quick STA Kickout */
+ WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
+ /* Rate to be used with Management frames */
+ WMI_10X_VDEV_PARAM_MGMT_RATE,
+ /* Protection Mode */
+ WMI_10X_VDEV_PARAM_PROTECTION_MODE,
+ /* Fixed rate setting */
+ WMI_10X_VDEV_PARAM_FIXED_RATE,
+ /* Short GI Enable/Disable */
+ WMI_10X_VDEV_PARAM_SGI,
+ /* Enable LDPC */
+ WMI_10X_VDEV_PARAM_LDPC,
+ /* Enable Tx STBC */
+ WMI_10X_VDEV_PARAM_TX_STBC,
+ /* Enable Rx STBC */
+ WMI_10X_VDEV_PARAM_RX_STBC,
+ /* Intra BSS forwarding */
+ WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
+ /* Setting Default xmit key for Vdev */
+ WMI_10X_VDEV_PARAM_DEF_KEYID,
+ /* NSS width */
+ WMI_10X_VDEV_PARAM_NSS,
+ /* Set the custom rate for the broadcast data frames */
+ WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
+ /* Set the custom rate (rate-code) for multicast data frames */
+ WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
+ /* Tx multicast packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_MCAST_INDICATE,
+ /* Tx DHCP packet indicate Enable/Disable */
+ WMI_10X_VDEV_PARAM_DHCP_INDICATE,
+ /* Enable host inspection of Tx unicast packet to unknown destination */
+ WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+
+ /* The minimum amount of time AP begins to consider STA inactive */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered inactive when there is no recent
+ * TX/RX activity and no downlink frames are buffered for it. Once a
+ * STA exceeds the maximum idle inactive time, the AP will send an
+ * 802.11 data-null as a keep alive to verify the STA is still
+ * associated. If the STA does ACK the data-null, or if the data-null
+ * is buffered and the STA does not retrieve it, the STA will be
+ * considered unresponsive
+ * (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
+ */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+
+ /*
+ * An associated STA is considered unresponsive if there is no recent
+ * TX/RX activity and downlink frames are buffered for it. Once a STA
+ * exceeds the maximum unresponsive time, the AP will send a
+ * WMI_10X_STA_KICKOUT event to the host so the STA can be deleted.
+ */
+ WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+
+ /* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
+ WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
+
+ WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
+ /* Enable/Disable RTS-CTS */
+ WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
+
+ WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+
+ /* following are available as of firmware 10.2 */
+ WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_10X_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_10X_VDEV_PARAM_MFPTEST_SET,
+ WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10X_VDEV_PARAM_VHT_SGIMASK,
+ WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_10X_VDEV_PARAM_TSF_INCREMENT,
+};
+
+enum wmi_10_4_vdev_param {
+ WMI_10_4_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_10_4_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_10_4_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_10_4_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_10_4_VDEV_PARAM_MULTICAST_RATE,
+ WMI_10_4_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_10_4_VDEV_PARAM_SLOT_TIME,
+ WMI_10_4_VDEV_PARAM_PREAMBLE,
+ WMI_10_4_VDEV_PARAM_SWBA_TIME,
+ WMI_10_4_VDEV_STATS_UPDATE_PERIOD,
+ WMI_10_4_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_10_4_VDEV_HOST_SWBA_INTERVAL,
+ WMI_10_4_VDEV_PARAM_DTIM_PERIOD,
+ WMI_10_4_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_10_4_VDEV_PARAM_WDS,
+ WMI_10_4_VDEV_PARAM_ATIM_WINDOW,
+ WMI_10_4_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_10_4_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_10_4_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_10_4_VDEV_PARAM_FEATURE_WMM,
+ WMI_10_4_VDEV_PARAM_CHWIDTH,
+ WMI_10_4_VDEV_PARAM_CHEXTOFFSET,
+ WMI_10_4_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_10_4_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_10_4_VDEV_PARAM_MGMT_RATE,
+ WMI_10_4_VDEV_PARAM_PROTECTION_MODE,
+ WMI_10_4_VDEV_PARAM_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_SGI,
+ WMI_10_4_VDEV_PARAM_LDPC,
+ WMI_10_4_VDEV_PARAM_TX_STBC,
+ WMI_10_4_VDEV_PARAM_RX_STBC,
+ WMI_10_4_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_10_4_VDEV_PARAM_DEF_KEYID,
+ WMI_10_4_VDEV_PARAM_NSS,
+ WMI_10_4_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_10_4_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_10_4_VDEV_PARAM_MCAST_INDICATE,
+ WMI_10_4_VDEV_PARAM_DHCP_INDICATE,
+ WMI_10_4_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_10_4_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_10_4_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_10_4_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_10_4_VDEV_PARAM_TXBF,
+ WMI_10_4_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_10_4_VDEV_PARAM_DROP_UNENCRY,
+ WMI_10_4_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_10_4_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_10_4_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_10_4_VDEV_PARAM_MFPTEST_SET,
+ WMI_10_4_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_10_4_VDEV_PARAM_VHT_SGIMASK,
+ WMI_10_4_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_10_4_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_10_4_VDEV_PARAM_PROXY_STA,
+ WMI_10_4_VDEV_PARAM_MERU_VC,
+ WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_10_4_VDEV_PARAM_SENSOR_AP,
+ WMI_10_4_VDEV_PARAM_BEACON_RATE,
+ WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_10_4_VDEV_PARAM_STA_KICKOUT,
+ WMI_10_4_VDEV_PARAM_CAPABILITIES,
+ WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
+ WMI_10_4_VDEV_PARAM_RX_FILTER,
+ WMI_10_4_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_10_4_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_10_4_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_10_4_VDEV_PARAM_TSF_DECREMENT,
+};
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7)
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG 0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT 0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG 0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT 0x2
+
+enum wmi_start_event_param {
+ WMI_VDEV_RESP_START_EVENT = 0,
+ WMI_VDEV_RESP_RESTART_EVENT,
+};
+
+struct wmi_vdev_start_response_event {
+ __le32 vdev_id;
+ __le32 req_id;
+ __le32 resp_type; /* %WMI_VDEV_RESP_ */
+ __le32 status;
+} __packed;
+
+struct wmi_vdev_standby_req_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_resume_req_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+/*
+ * common structure used for simple events
+ * (stopped, resume_req, standby response)
+ */
+struct wmi_vdev_simple_event {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+} __packed;
+
+/* VDEV start response status codes */
+/* VDEV successfully started */
+#define WMI_INIFIED_VDEV_START_RESPONSE_STATUS_SUCCESS 0x0
+
+/* requested VDEV not found */
+#define WMI_INIFIED_VDEV_START_RESPONSE_INVALID_VDEVID 0x1
+
+/* unsupported VDEV combination */
+#define WMI_INIFIED_VDEV_START_RESPONSE_NOT_SUPPORTED 0x2
+
+/* TODO: please add more comments if you have in-depth information */
+struct wmi_vdev_spectral_conf_cmd {
+ __le32 vdev_id;
+
+ /* number of fft samples to send (0 for infinite) */
+ __le32 scan_count;
+ __le32 scan_period;
+ __le32 scan_priority;
+
+ /* number of bins in the FFT: 2^(fft_size - bin_scale) */
+ __le32 scan_fft_size;
+ __le32 scan_gc_ena;
+ __le32 scan_restart_ena;
+ __le32 scan_noise_floor_ref;
+ __le32 scan_init_delay;
+ __le32 scan_nb_tone_thr;
+ __le32 scan_str_bin_thr;
+ __le32 scan_wb_rpt_mode;
+ __le32 scan_rssi_rpt_mode;
+ __le32 scan_rssi_thr;
+ __le32 scan_pwr_format;
+
+ /* rpt_mode: Format of FFT report to software for spectral scan
+ * triggered FFTs:
+ * 0: No FFT report (only spectral scan summary report)
+ * 1: 2-dword summary of metrics for each completed FFT + spectral
+ * scan summary report
+ * 2: 2-dword summary of metrics for each completed FFT +
+ * 1x- oversampled bins(in-band) per FFT + spectral scan summary
+ * report
+ * 3: 2-dword summary of metrics for each completed FFT +
+ * 2x- oversampled bins (all) per FFT + spectral scan summary
+ */
+ __le32 scan_rpt_mode;
+ __le32 scan_bin_scale;
+ __le32 scan_dbm_adj;
+ __le32 scan_chn_mask;
+} __packed;
+
+struct wmi_vdev_spectral_conf_arg {
+ u32 vdev_id;
+ u32 scan_count;
+ u32 scan_period;
+ u32 scan_priority;
+ u32 scan_fft_size;
+ u32 scan_gc_ena;
+ u32 scan_restart_ena;
+ u32 scan_noise_floor_ref;
+ u32 scan_init_delay;
+ u32 scan_nb_tone_thr;
+ u32 scan_str_bin_thr;
+ u32 scan_wb_rpt_mode;
+ u32 scan_rssi_rpt_mode;
+ u32 scan_rssi_thr;
+ u32 scan_pwr_format;
+ u32 scan_rpt_mode;
+ u32 scan_bin_scale;
+ u32 scan_dbm_adj;
+ u32 scan_chn_mask;
+};
+
+#define WMI_SPECTRAL_ENABLE_DEFAULT 0
+#define WMI_SPECTRAL_COUNT_DEFAULT 0
+#define WMI_SPECTRAL_PERIOD_DEFAULT 35
+#define WMI_SPECTRAL_PRIORITY_DEFAULT 1
+#define WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
+#define WMI_SPECTRAL_GC_ENA_DEFAULT 1
+#define WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
+#define WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
+#define WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
+#define WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
+#define WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
+#define WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
+#define WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
+#define WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
+#define WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
+#define WMI_SPECTRAL_RPT_MODE_DEFAULT 2
+#define WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
+#define WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
+#define WMI_SPECTRAL_CHN_MASK_DEFAULT 1
+
+struct wmi_vdev_spectral_enable_cmd {
+ __le32 vdev_id;
+ __le32 trigger_cmd;
+ __le32 enable_cmd;
+} __packed;
+
+#define WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
+#define WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
+#define WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
+#define WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
+
+/* Beacon processing related command and event structures */
+struct wmi_bcn_tx_hdr {
+ __le32 vdev_id;
+ __le32 tx_rate;
+ __le32 tx_power;
+ __le32 bcn_len;
+} __packed;
+
+struct wmi_bcn_tx_cmd {
+ struct wmi_bcn_tx_hdr hdr;
+ u8 *bcn[0];
+} __packed;
+
+struct wmi_bcn_tx_arg {
+ u32 vdev_id;
+ u32 tx_rate;
+ u32 tx_power;
+ u32 bcn_len;
+ const void *bcn;
+};
+
+enum wmi_bcn_tx_ref_flags {
+ WMI_BCN_TX_REF_FLAG_DTIM_ZERO = 0x1,
+ WMI_BCN_TX_REF_FLAG_DELIVER_CAB = 0x2,
+};
+
+/* TODO: It is unclear why "no antenna" works while any other seemingly valid
+ * chainmask yields no beacons on the air at all.
+ */
+#define WMI_BCN_TX_REF_DEF_ANTENNA 0
+
+struct wmi_bcn_tx_ref_cmd {
+ __le32 vdev_id;
+ __le32 data_len;
+ /* physical address of the frame - dma pointer */
+ __le32 data_ptr;
+ /* id for host to track */
+ __le32 msdu_id;
+ /* frame ctrl to setup PPDU desc */
+ __le32 frame_control;
+ /* to control CABQ traffic: WMI_BCN_TX_REF_FLAG_ */
+ __le32 flags;
+ /* introduced in 10.2 */
+ __le32 antenna_mask;
+} __packed;
+
+/* Beacon filter */
+#define WMI_BCN_FILTER_ALL 0 /* Filter all beacons */
+#define WMI_BCN_FILTER_NONE 1 /* Pass all beacons */
+#define WMI_BCN_FILTER_RSSI 2 /* Pass Beacons RSSI >= RSSI threshold */
+#define WMI_BCN_FILTER_BSSID 3 /* Pass Beacons with matching BSSID */
+#define WMI_BCN_FILTER_SSID 4 /* Pass Beacons with matching SSID */
+
+struct wmi_bcn_filter_rx_cmd {
+ /* Filter ID */
+ __le32 bcn_filter_id;
+ /* Filter type - wmi_bcn_filter */
+ __le32 bcn_filter;
+ /* Buffer len */
+ __le32 bcn_filter_len;
+ /* Filter info (threshold, BSSID, RSSI) */
+ u8 *bcn_filter_buf;
+} __packed;
+
+/* Capabilities and IEs to be passed to firmware */
+struct wmi_bcn_prb_info {
+ /* Capabilities */
+ __le32 caps;
+ /* ERP info */
+ __le32 erp;
+ /* Advanced capabilities */
+ /* HT capabilities */
+ /* HT Info */
+ /* ibss_dfs */
+ /* wpa Info */
+ /* rsn Info */
+ /* rrm info */
+ /* ath_ext */
+ /* app IE */
+} __packed;
+
+struct wmi_bcn_tmpl_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* TIM IE offset from the beginning of the template. */
+ __le32 tim_ie_offset;
+ /* beacon probe capabilities and IEs */
+ struct wmi_bcn_prb_info bcn_prb_info;
+ /* beacon buffer length */
+ __le32 buf_len;
+ /* variable length data */
+ u8 data[1];
+} __packed;
+
+struct wmi_prb_tmpl_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* beacon probe capabilities and IEs */
+ struct wmi_bcn_prb_info bcn_prb_info;
+ /* beacon buffer length */
+ __le32 buf_len;
+ /* Variable length data */
+ u8 data[1];
+} __packed;
+
+enum wmi_sta_ps_mode {
+ /* enable power save for the given STA VDEV */
+ WMI_STA_PS_MODE_DISABLED = 0,
+ /* disable power save for a given STA VDEV */
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+struct wmi_sta_powersave_mode_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+
+ /*
+ * Power save mode
+ * (see enum wmi_sta_ps_mode)
+ */
+ __le32 sta_ps_mode;
+} __packed;
+
+enum wmi_csa_offload_en {
+ WMI_CSA_OFFLOAD_DISABLE = 0,
+ WMI_CSA_OFFLOAD_ENABLE = 1,
+};
+
+struct wmi_csa_offload_enable_cmd {
+ __le32 vdev_id;
+ __le32 csa_offload_enable;
+} __packed;
+
+struct wmi_csa_offload_chanswitch_cmd {
+ __le32 vdev_id;
+ struct wmi_channel chan;
+} __packed;
+
+/*
+ * This parameter controls the policy for retrieving frames from AP while the
+ * STA is in sleep state.
+ *
+ * Only takes affect if the sta_ps_mode is enabled
+ */
+enum wmi_sta_ps_param_rx_wake_policy {
+ /*
+ * Wake up when ever there is an RX activity on the VDEV. In this mode
+ * the Power save SM(state machine) will come out of sleep by either
+ * sending null frame (or) a data frame (with PS==0) in response to TIM
+ * bit set in the received beacon frame from AP.
+ */
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+
+ /*
+ * Here the power save state machine will not wakeup in response to TIM
+ * bit, instead it will send a PSPOLL (or) UASPD trigger based on UAPSD
+ * configuration setup by WMISET_PS_SET_UAPSD WMI command. When all
+ * access categories are delivery-enabled, the station will send a
+ * UAPSD trigger frame, otherwise it will send a PS-Poll.
+ */
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/*
+ * Number of tx frames/beacon that cause the power save SM to wake up.
+ *
+ * Value 1 causes the SM to wake up for every TX. Value 0 has a special
+ * meaning, It will cause the SM to never wake up. This is useful if you want
+ * to keep the system to sleep all the time for some kind of test mode . host
+ * can change this parameter any time. It will affect at the next tx frame.
+ */
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /*
+ * Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/*
+ * The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /*
+ * Values greater than 0 indicate the maximum numer of PS-Poll frames
+ * FW will send before waking up.
+ */
+
+ /* When u-APSD is enabled the firmware will be very reluctant to exit
+ * STA PS. This could result in very poor Rx performance with STA doing
+ * PS-Poll for each and every buffered frame. This value is a bit
+ * arbitrary.
+ */
+ WMI_STA_PS_PSPOLL_COUNT_UAPSD = 3,
+};
+
+/*
+ * This will include the delivery and trigger enabled state for every AC.
+ * This is the negotiated state with AP. The host MLME needs to set this based
+ * on AP capability and the state Set in the association request by the
+ * station MLME.Lower 8 bits of the value specify the UAPSD configuration.
+ */
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+ (type == WMI_UAPSD_AC_TYPE_DELI ? 1 << (ac << 1) : 1 << ((ac << 1) + 1))
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+ __le32 wmm_ac;
+ __le32 user_priority;
+ __le32 service_interval;
+ __le32 suspend_interval;
+ __le32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+enum wmi_sta_powersave_param {
+ /*
+ * Controls how frames are retrievd from AP while STA is sleeping
+ *
+ * (see enum wmi_sta_ps_param_rx_wake_policy)
+ */
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+
+ /*
+ * The STA will go active after this many TX
+ *
+ * (see enum wmi_sta_ps_param_tx_wake_threshold)
+ */
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+
+ /*
+ * Number of PS-Poll to send before STA wakes up
+ *
+ * (see enum wmi_sta_ps_param_pspoll_count)
+ *
+ */
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+
+ /*
+ * TX/RX inactivity time in msec before going to sleep.
+ *
+ * The power save SM will monitor tx/rx activity on the VDEV, if no
+ * activity for the specified msec of the parameter the Power save
+ * SM will go to sleep.
+ */
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+
+ /*
+ * Set uapsd configuration.
+ *
+ * (see enum wmi_sta_ps_param_uapsd)
+ */
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+struct wmi_sta_powersave_param_cmd {
+ __le32 vdev_id;
+ __le32 param_id; /* %WMI_STA_PS_PARAM_ */
+ __le32 param_value;
+} __packed;
+
+/* No MIMO power save */
+#define WMI_STA_MIMO_PS_MODE_DISABLE
+/* mimo powersave mode static*/
+#define WMI_STA_MIMO_PS_MODE_STATIC
+/* mimo powersave mode dynamic */
+#define WMI_STA_MIMO_PS_MODE_DYNAMIC
+
+struct wmi_sta_mimo_ps_mode_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* mimo powersave mode as defined above */
+ __le32 mimo_pwrsave_mode;
+} __packed;
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+/*
+ * AP power save parameter
+ * Set a power save specific parameter for a peer station
+ */
+enum wmi_ap_ps_peer_param {
+ /* Set uapsd configuration for a given peer.
+ *
+ * Include the delivery and trigger enabled state for every AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /*
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /* Time in seconds for aging out buffered frames for STA in PS */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+};
+
+struct wmi_ap_ps_peer_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+
+ /* AP powersave param (see enum wmi_ap_ps_peer_param) */
+ __le32 param_id;
+
+ /* AP powersave param value */
+ __le32 param_value;
+} __packed;
+
+/* 128 clients = 4 words */
+#define WMI_TIM_BITMAP_ARRAY_SIZE 4
+
+struct wmi_tim_info {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ __le32 tim_bitmap[WMI_TIM_BITMAP_ARRAY_SIZE];
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+struct wmi_tim_info_arg {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ const __le32 *tim_bitmap;
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+/* Maximum number of NOA Descriptors supported */
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+#define WMI_P2P_OPPPS_ENABLE_BIT BIT(0)
+#define WMI_P2P_OPPPS_CTWINDOW_OFFSET 1
+#define WMI_P2P_NOA_CHANGED_BIT BIT(0)
+
+struct wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ */
+ u8 changed;
+ /* NOA index */
+ u8 index;
+ /* Bit 0 - Opp PS state of the AP
+ * Bits 1-7 - Ctwindow in TUs
+ */
+ u8 ctwindow_oppps;
+ /* Number of NOA descriptors */
+ u8 num_descriptors;
+
+ struct wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_bcn_info {
+ struct wmi_tim_info tim_info;
+ struct wmi_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_bcn_info bcn_info[0];
+} __packed;
+
+struct wmi_10_2_4_bcn_info {
+ struct wmi_tim_info tim_info;
+ /* The 10.2.4 FW doesn't have p2p NOA info */
+} __packed;
+
+struct wmi_10_2_4_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_10_2_4_bcn_info bcn_info[0];
+} __packed;
+
+/* 16 words = 512 client + 1 word = for guard */
+#define WMI_10_4_TIM_BITMAP_ARRAY_SIZE 17
+
+struct wmi_10_4_tim_info {
+ __le32 tim_len;
+ __le32 tim_mcast;
+ __le32 tim_bitmap[WMI_10_4_TIM_BITMAP_ARRAY_SIZE];
+ __le32 tim_changed;
+ __le32 tim_num_ps_pending;
+} __packed;
+
+#define WMI_10_4_P2P_MAX_NOA_DESCRIPTORS 1
+
+struct wmi_10_4_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ */
+ u8 changed;
+ /* NOA index */
+ u8 index;
+ /* Bit 0 - Opp PS state of the AP
+ * Bits 1-7 - Ctwindow in TUs
+ */
+ u8 ctwindow_oppps;
+ /* Number of NOA descriptors */
+ u8 num_descriptors;
+
+ struct wmi_p2p_noa_descriptor
+ noa_descriptors[WMI_10_4_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
+struct wmi_10_4_bcn_info {
+ struct wmi_10_4_tim_info tim_info;
+ struct wmi_10_4_p2p_noa_info p2p_noa_info;
+} __packed;
+
+struct wmi_10_4_host_swba_event {
+ __le32 vdev_map;
+ struct wmi_10_4_bcn_info bcn_info[0];
+} __packed;
+
+#define WMI_MAX_AP_VDEV 16
+
+struct wmi_tbtt_offset_event {
+ __le32 vdev_map;
+ __le32 tbttoffset_list[WMI_MAX_AP_VDEV];
+} __packed;
+
+struct wmi_peer_create_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_type;
+} __packed;
+
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_delete_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_fixed_rate {
+ /*
+ * rate mode . 0: disable fixed rate (auto rate)
+ * 1: legacy (non 11n) rate specified as ieee rate 2*Mbps
+ * 2: ht20 11n rate specified as mcs index
+ * 3: ht40 11n rate specified as mcs index
+ */
+ __le32 rate_mode;
+ /*
+ * 4 rate values for 4 rate series. series 0 is stored in byte 0 (LSB)
+ * and series 3 is stored at byte 3 (MSB)
+ */
+ __le32 rate_series;
+ /*
+ * 4 retry counts for 4 rate series. retry count for rate 0 is stored
+ * in byte 0 (LSB) and retry count for rate 3 is stored at byte 3
+ * (MSB)
+ */
+ __le32 rate_retries;
+} __packed;
+
+struct wmi_peer_fixed_rate_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* fixed rate */
+ struct wmi_fixed_rate peer_fixed_rate;
+} __packed;
+
+#define WMI_MGMT_TID 17
+
+struct wmi_addba_clear_resp_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* Buffer/Window size*/
+ __le32 buffersize;
+} __packed;
+
+struct wmi_delba_send_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* Is Initiator */
+ __le32 initiator;
+ /* Reason code */
+ __le32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ /* unique id identifying the vdev, generated by the caller */
+ __le32 vdev_id;
+ /* peer mac address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+ /* status code */
+ __le32 statuscode;
+} __packed;
+
+struct wmi_send_singleamsdu_cmd {
+ /* unique id identifying the vdev, generated by the caller */
+ __le32 vdev_id;
+ /* peer mac address */
+ struct wmi_mac_addr peer_macaddr;
+ /* Tid number */
+ __le32 tid;
+} __packed;
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0x0,
+ WMI_PEER_SMPS_STATIC = 0x1,
+ WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_peer_param {
+ WMI_PEER_SMPS_STATE = 0x1, /* see %wmi_peer_smps_state */
+ WMI_PEER_AMPDU = 0x2,
+ WMI_PEER_AUTHORIZE = 0x3,
+ WMI_PEER_CHAN_WIDTH = 0x4,
+ WMI_PEER_NSS = 0x5,
+ WMI_PEER_USE_4ADDR = 0x6,
+ WMI_PEER_DEBUG = 0xa,
+ WMI_PEER_PHYMODE = 0xd,
+ WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
+};
+
+struct wmi_peer_set_param_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 param_id;
+ __le32 param_value;
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+struct wmi_rate_set {
+ /* total number of rates */
+ __le32 num_rates;
+ /*
+ * rates (each 8bit value) packed into a 32 bit word.
+ * the rates are filled from least significant byte to most
+ * significant byte.
+ */
+ __le32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
+} __packed;
+
+struct wmi_rate_set_arg {
+ unsigned int num_rates;
+ u8 rates[MAX_SUPPORTED_RATES];
+};
+
+/*
+ * NOTE: It would bea good idea to represent the Tx MCS
+ * info in one word and Rx in another word. This is split
+ * into multiple words for convenience
+ */
+struct wmi_vht_rate_set {
+ __le32 rx_max_rate; /* Max Rx data rate */
+ __le32 rx_mcs_set; /* Negotiated RX VHT rates */
+ __le32 tx_max_rate; /* Max Tx data rate */
+ __le32 tx_mcs_set; /* Negotiated TX VHT rates */
+} __packed;
+
+struct wmi_vht_rate_set_arg {
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+};
+
+struct wmi_peer_set_rates_cmd {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* legacy rate set */
+ struct wmi_rate_set peer_legacy_rates;
+ /* ht rate set */
+ struct wmi_rate_set peer_ht_rates;
+} __packed;
+
+struct wmi_peer_set_q_empty_callback_cmd {
+ /* unique id identifying the VDEV, generated by the caller */
+ __le32 vdev_id;
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ __le32 callback_enable;
+} __packed;
+
+struct wmi_peer_flags_map {
+ u32 auth;
+ u32 qos;
+ u32 need_ptk_4_way;
+ u32 need_gtk_2_way;
+ u32 apsd;
+ u32 ht;
+ u32 bw40;
+ u32 stbc;
+ u32 ldbc;
+ u32 dyn_mimops;
+ u32 static_mimops;
+ u32 spatial_mux;
+ u32 vht;
+ u32 bw80;
+ u32 vht_2g;
+ u32 pmf;
+ u32 bw160;
+};
+
+enum wmi_peer_flags {
+ WMI_PEER_AUTH = 0x00000001,
+ WMI_PEER_QOS = 0x00000002,
+ WMI_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_PEER_APSD = 0x00000800,
+ WMI_PEER_HT = 0x00001000,
+ WMI_PEER_40MHZ = 0x00002000,
+ WMI_PEER_STBC = 0x00008000,
+ WMI_PEER_LDPC = 0x00010000,
+ WMI_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_PEER_VHT = 0x02000000,
+ WMI_PEER_80MHZ = 0x04000000,
+ WMI_PEER_VHT_2G = 0x08000000,
+ WMI_PEER_PMF = 0x10000000,
+ WMI_PEER_160MHZ = 0x20000000
+};
+
+enum wmi_10x_peer_flags {
+ WMI_10X_PEER_AUTH = 0x00000001,
+ WMI_10X_PEER_QOS = 0x00000002,
+ WMI_10X_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_10X_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_10X_PEER_APSD = 0x00000800,
+ WMI_10X_PEER_HT = 0x00001000,
+ WMI_10X_PEER_40MHZ = 0x00002000,
+ WMI_10X_PEER_STBC = 0x00008000,
+ WMI_10X_PEER_LDPC = 0x00010000,
+ WMI_10X_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_10X_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_10X_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_10X_PEER_VHT = 0x02000000,
+ WMI_10X_PEER_80MHZ = 0x04000000,
+ WMI_10X_PEER_160MHZ = 0x20000000
+};
+
+enum wmi_10_2_peer_flags {
+ WMI_10_2_PEER_AUTH = 0x00000001,
+ WMI_10_2_PEER_QOS = 0x00000002,
+ WMI_10_2_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_10_2_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_10_2_PEER_APSD = 0x00000800,
+ WMI_10_2_PEER_HT = 0x00001000,
+ WMI_10_2_PEER_40MHZ = 0x00002000,
+ WMI_10_2_PEER_STBC = 0x00008000,
+ WMI_10_2_PEER_LDPC = 0x00010000,
+ WMI_10_2_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_10_2_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_10_2_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_10_2_PEER_VHT = 0x02000000,
+ WMI_10_2_PEER_80MHZ = 0x04000000,
+ WMI_10_2_PEER_VHT_2G = 0x08000000,
+ WMI_10_2_PEER_PMF = 0x10000000,
+ WMI_10_2_PEER_160MHZ = 0x20000000
+};
+
+/*
+ * Peer rate capabilities.
+ *
+ * This is of interest to the ratecontrol
+ * module which resides in the firmware. The bit definitions are
+ * consistent with that defined in if_athrate.c.
+ */
+#define WMI_RC_DS_FLAG 0x01
+#define WMI_RC_CW40_FLAG 0x02
+#define WMI_RC_SGI_FLAG 0x04
+#define WMI_RC_HT_FLAG 0x08
+#define WMI_RC_RTSCTS_FLAG 0x10
+#define WMI_RC_TX_STBC_FLAG 0x20
+#define WMI_RC_RX_STBC_FLAG 0xC0
+#define WMI_RC_RX_STBC_FLAG_S 6
+#define WMI_RC_WEP_TKIP_FLAG 0x100
+#define WMI_RC_TS_FLAG 0x200
+#define WMI_RC_UAPSD_FLAG 0x400
+
+/* Maximum listen interval supported by hw in units of beacon interval */
+#define ATH10K_MAX_HW_LISTEN_INTERVAL 5
+
+struct wmi_common_peer_assoc_complete_cmd {
+ struct wmi_mac_addr peer_macaddr;
+ __le32 vdev_id;
+ __le32 peer_new_assoc; /* 1=assoc, 0=reassoc */
+ __le32 peer_associd; /* 16 LSBs */
+ __le32 peer_flags;
+ __le32 peer_caps; /* 16 LSBs */
+ __le32 peer_listen_intval;
+ __le32 peer_ht_caps;
+ __le32 peer_max_mpdu;
+ __le32 peer_mpdu_density; /* 0..16 */
+ __le32 peer_rate_caps;
+ struct wmi_rate_set peer_legacy_rates;
+ struct wmi_rate_set peer_ht_rates;
+ __le32 peer_nss; /* num of spatial streams */
+ __le32 peer_vht_caps;
+ __le32 peer_phymode;
+ struct wmi_vht_rate_set peer_vht_rates;
+};
+
+struct wmi_main_peer_assoc_complete_cmd {
+ struct wmi_common_peer_assoc_complete_cmd cmd;
+
+ /* HT Operation Element of the peer. Five bytes packed in 2
+ * INT32 array and filled from lsb to msb.
+ */
+ __le32 peer_ht_info[2];
+} __packed;
+
+struct wmi_10_1_peer_assoc_complete_cmd {
+ struct wmi_common_peer_assoc_complete_cmd cmd;
+} __packed;
+
+#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_LSB 0
+#define WMI_PEER_ASSOC_INFO0_MAX_MCS_IDX_MASK 0x0f
+#define WMI_PEER_ASSOC_INFO0_MAX_NSS_LSB 4
+#define WMI_PEER_ASSOC_INFO0_MAX_NSS_MASK 0xf0
+
+struct wmi_10_2_peer_assoc_complete_cmd {
+ struct wmi_common_peer_assoc_complete_cmd cmd;
+ __le32 info0; /* WMI_PEER_ASSOC_INFO0_ */
+} __packed;
+
+#define PEER_BW_RXNSS_OVERRIDE_OFFSET 31
+
+struct wmi_10_4_peer_assoc_complete_cmd {
+ struct wmi_10_2_peer_assoc_complete_cmd cmd;
+ __le32 peer_bw_rxnss_override;
+} __packed;
+
+struct wmi_peer_assoc_complete_arg {
+ u8 addr[ETH_ALEN];
+ u32 vdev_id;
+ bool peer_reassoc;
+ u16 peer_aid;
+ u32 peer_flags; /* see %WMI_PEER_ */
+ u16 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density; /* 0..16 */
+ u32 peer_rate_caps; /* see %WMI_RC_ */
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 peer_num_spatial_streams;
+ u32 peer_vht_caps;
+ enum wmi_phy_mode peer_phymode;
+ struct wmi_vht_rate_set_arg peer_vht_rates;
+ u32 peer_bw_rxnss_override;
+};
+
+struct wmi_peer_add_wds_entry_cmd {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+ /* wds MAC addr */
+ struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_remove_wds_entry_cmd {
+ /* wds MAC addr */
+ struct wmi_mac_addr wds_macaddr;
+} __packed;
+
+struct wmi_peer_q_empty_callback_event {
+ /* peer MAC address */
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+/*
+ * Channel info WMI event
+ */
+struct wmi_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+} __packed;
+
+struct wmi_10_4_chan_info_event {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+} __packed;
+
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+#define WMI_CHAN_INFO_FLAG_PRE_COMPLETE BIT(1)
+
+/* Beacon filter wmi command info */
+#define BCN_FLT_MAX_SUPPORTED_IES 256
+#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)
+
+struct bss_bcn_stats {
+ __le32 vdev_id;
+ __le32 bss_bcnsdropped;
+ __le32 bss_bcnsdelivered;
+} __packed;
+
+struct bcn_filter_stats {
+ __le32 bcns_dropped;
+ __le32 bcns_delivered;
+ __le32 activefilters;
+ struct bss_bcn_stats bss_stats;
+} __packed;
+
+struct wmi_add_bcn_filter_cmd {
+ u32 vdev_id;
+ u32 ie_map[BCN_FLT_MAX_ELEMS_IE_LIST];
+} __packed;
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITATED_ARP_RESPONSE = 2,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
+/* Firmware crashes if keepalive interval exceeds this limit */
+#define WMI_STA_KEEPALIVE_INTERVAL_MAX_SECONDS 0xffff
+
+/* note: ip4 addresses are in network byte order, i.e. big endian */
+struct wmi_sta_keepalive_arp_resp {
+ __be32 src_ip4_addr;
+ __be32 dest_ip4_addr;
+ struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ __le32 vdev_id;
+ __le32 enabled;
+ __le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 interval; /* in seconds */
+ struct wmi_sta_keepalive_arp_resp arp_resp;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ __be32 src_ip4_addr;
+ __be32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_force_fw_hang_type {
+ WMI_FORCE_FW_HANG_ASSERT = 1,
+ WMI_FORCE_FW_HANG_NO_DETECT,
+ WMI_FORCE_FW_HANG_CTRL_EP_FULL,
+ WMI_FORCE_FW_HANG_EMPTY_POINT,
+ WMI_FORCE_FW_HANG_STACK_OVERFLOW,
+ WMI_FORCE_FW_HANG_INFINITE_LOOP,
+};
+
+#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF
+
+struct wmi_force_fw_hang_cmd {
+ __le32 type;
+ __le32 delay_ms;
+} __packed;
+
+enum wmi_pdev_reset_mode_type {
+ WMI_RST_MODE_TX_FLUSH = 1,
+ WMI_RST_MODE_WARM_RESET,
+ WMI_RST_MODE_COLD_RESET,
+ WMI_RST_MODE_WARM_RESET_RESTORE_CAL,
+ WMI_RST_MODE_COLD_RESET_RESTORE_CAL,
+ WMI_RST_MODE_MAX,
+};
+
+enum ath10k_dbglog_level {
+ ATH10K_DBGLOG_LEVEL_VERBOSE = 0,
+ ATH10K_DBGLOG_LEVEL_INFO = 1,
+ ATH10K_DBGLOG_LEVEL_WARN = 2,
+ ATH10K_DBGLOG_LEVEL_ERR = 3,
+};
+
+/* VAP ids to enable dbglog */
+#define ATH10K_DBGLOG_CFG_VAP_LOG_LSB 0
+#define ATH10K_DBGLOG_CFG_VAP_LOG_MASK 0x0000ffff
+
+/* to enable dbglog in the firmware */
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_LSB 16
+#define ATH10K_DBGLOG_CFG_REPORTING_ENABLE_MASK 0x00010000
+
+/* timestamp resolution */
+#define ATH10K_DBGLOG_CFG_RESOLUTION_LSB 17
+#define ATH10K_DBGLOG_CFG_RESOLUTION_MASK 0x000E0000
+
+/* number of queued messages before sending them to the host */
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_LSB 20
+#define ATH10K_DBGLOG_CFG_REPORT_SIZE_MASK 0x0ff00000
+
+/*
+ * Log levels to enable. This defines the minimum level to enable, this is
+ * not a bitmask. See enum ath10k_dbglog_level for the values.
+ */
+#define ATH10K_DBGLOG_CFG_LOG_LVL_LSB 28
+#define ATH10K_DBGLOG_CFG_LOG_LVL_MASK 0x70000000
+
+/*
+ * Note: this is a cleaned up version of a struct firmware uses. For
+ * example, config_valid was hidden inside an array.
+ */
+struct wmi_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le32 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le32 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
+struct wmi_10_4_dbglog_cfg_cmd {
+ /* bitmask to hold mod id config*/
+ __le64 module_enable;
+
+ /* see ATH10K_DBGLOG_CFG_ */
+ __le32 config_enable;
+
+ /* mask of module id bits to be changed */
+ __le64 module_valid;
+
+ /* mask of config bits to be changed, see ATH10K_DBGLOG_CFG_ */
+ __le32 config_valid;
+} __packed;
+
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_ev {
+ __le32 vdev_id;
+ __le32 reason;
+} __packed;
+
+#define ATH10K_FRAGMT_THRESHOLD_MIN 540
+#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
+
+#define WMI_MAX_EVENT 0x1000
+/* Maximum number of pending TXed WMI packets */
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+/* By default disable power save for IBSS */
+#define ATH10K_DEFAULT_ATIM 0
+
+#define WMI_MAX_MEM_REQS 16
+
+struct wmi_scan_ev_arg {
+ __le32 event_type; /* %WMI_SCAN_EVENT_ */
+ __le32 reason; /* %WMI_SCAN_REASON_ */
+ __le32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ __le32 scan_req_id;
+ __le32 scan_id;
+ __le32 vdev_id;
+};
+
+struct wmi_tlv_mgmt_tx_compl_ev_arg {
+ __le32 desc_id;
+ __le32 status;
+ __le32 pdev_id;
+};
+
+struct wmi_mgmt_rx_ev_arg {
+ __le32 channel;
+ __le32 snr;
+ __le32 rate;
+ __le32 phy_mode;
+ __le32 buf_len;
+ __le32 status; /* %WMI_RX_STATUS_ */
+ struct wmi_mgmt_rx_ext_info ext_info;
+};
+
+struct wmi_ch_info_ev_arg {
+ __le32 err_code;
+ __le32 freq;
+ __le32 cmd_flags;
+ __le32 noise_floor;
+ __le32 rx_clear_count;
+ __le32 cycle_count;
+ __le32 chan_tx_pwr_range;
+ __le32 chan_tx_pwr_tp;
+ __le32 rx_frame_count;
+};
+
+/* From 10.4 firmware, not sure all have the same values. */
+enum wmi_vdev_start_status {
+ WMI_VDEV_START_OK = 0,
+ WMI_VDEV_START_CHAN_INVALID,
+};
+
+struct wmi_vdev_start_ev_arg {
+ __le32 vdev_id;
+ __le32 req_id;
+ __le32 resp_type; /* %WMI_VDEV_RESP_ */
+ __le32 status; /* See wmi_vdev_start_status enum above */
+};
+
+struct wmi_peer_kick_ev_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_swba_ev_arg {
+ __le32 vdev_map;
+ struct wmi_tim_info_arg tim_info[WMI_MAX_AP_VDEV];
+ const struct wmi_p2p_noa_info *noa_info[WMI_MAX_AP_VDEV];
+};
+
+struct wmi_phyerr_ev_arg {
+ u32 tsf_timestamp;
+ u16 freq1;
+ u16 freq2;
+ u8 rssi_combined;
+ u8 chan_width_mhz;
+ u8 phy_err_code;
+ u16 nf_chains[4];
+ u32 buf_len;
+ const u8 *buf;
+ u8 hdr_len;
+};
+
+struct wmi_phyerr_hdr_arg {
+ u32 num_phyerrs;
+ u32 tsf_l32;
+ u32 tsf_u32;
+ u32 buf_len;
+ const void *phyerrs;
+};
+
+struct wmi_dfs_status_ev_arg {
+ u32 status;
+};
+
+struct wmi_svc_rdy_ev_arg {
+ __le32 min_tx_power;
+ __le32 max_tx_power;
+ __le32 ht_cap;
+ __le32 vht_cap;
+ __le32 sw_ver0;
+ __le32 sw_ver1;
+ __le32 fw_build;
+ __le32 phy_capab;
+ __le32 num_rf_chains;
+ __le32 eeprom_rd;
+ __le32 num_mem_reqs;
+ __le32 low_5ghz_chan;
+ __le32 high_5ghz_chan;
+ const __le32 *service_map;
+ size_t service_map_len;
+ const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
+};
+
+struct wmi_svc_avail_ev_arg {
+ bool service_map_ext_valid;
+ __le32 service_map_ext_len;
+ const __le32 *service_map_ext;
+};
+
+struct wmi_rdy_ev_arg {
+ __le32 sw_version;
+ __le32 abi_version;
+ __le32 status;
+ const u8 *mac_addr;
+};
+
+struct wmi_roam_ev_arg {
+ __le32 vdev_id;
+ __le32 reason;
+ __le32 rssi;
+};
+
+struct wmi_echo_ev_arg {
+ __le32 value;
+};
+
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celcius degree */
+ __le32 temperature;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+ __le32 freq;
+ __le32 noise_floor;
+ __le64 cycle_busy;
+ __le64 cycle_total;
+ __le64 cycle_tx;
+ __le64 cycle_rx;
+ __le64 cycle_rx_bss;
+ __le32 reserved;
+} __packed;
+
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+ u32 vdev_id;
+ u32 flag;
+ enum wmi_wow_wake_reason wake_reason;
+ u32 data_len;
+};
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+enum wmi_tdls_state {
+ WMI_TDLS_DISABLE,
+ WMI_TDLS_ENABLE_PASSIVE,
+ WMI_TDLS_ENABLE_ACTIVE,
+ WMI_TDLS_ENABLE_ACTIVE_EXTERNAL_CONTROL,
+};
+
+enum wmi_tdls_peer_state {
+ WMI_TDLS_PEER_STATE_PEERING,
+ WMI_TDLS_PEER_STATE_CONNECTED,
+ WMI_TDLS_PEER_STATE_TEARDOWN,
+};
+
+struct wmi_tdls_peer_update_cmd_arg {
+ u32 vdev_id;
+ enum wmi_tdls_peer_state peer_state;
+ u8 addr[ETH_ALEN];
+};
+
+#define WMI_TDLS_MAX_SUPP_OPER_CLASSES 32
+
+#define WMI_TDLS_PEER_SP_MASK 0x60
+#define WMI_TDLS_PEER_SP_LSB 5
+
+enum wmi_tdls_options {
+ WMI_TDLS_OFFCHAN_EN = BIT(0),
+ WMI_TDLS_BUFFER_STA_EN = BIT(1),
+ WMI_TDLS_SLEEP_STA_EN = BIT(2),
+};
+
+enum {
+ WMI_TDLS_PEER_QOS_AC_VO = BIT(0),
+ WMI_TDLS_PEER_QOS_AC_VI = BIT(1),
+ WMI_TDLS_PEER_QOS_AC_BK = BIT(2),
+ WMI_TDLS_PEER_QOS_AC_BE = BIT(3),
+};
+
+struct wmi_tdls_peer_capab_arg {
+ u8 peer_uapsd_queues;
+ u8 peer_max_sp;
+ u32 buff_sta_support;
+ u32 off_chan_support;
+ u32 peer_curr_operclass;
+ u32 self_curr_operclass;
+ u32 peer_chan_len;
+ u32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ u32 is_peer_responder;
+ u32 pref_offchan_num;
+ u32 pref_offchan_bw;
+};
+
+struct wmi_10_4_tdls_set_state_cmd {
+ __le32 vdev_id;
+ __le32 state;
+ __le32 notification_interval_ms;
+ __le32 tx_discovery_threshold;
+ __le32 tx_teardown_threshold;
+ __le32 rssi_teardown_threshold;
+ __le32 rssi_delta;
+ __le32 tdls_options;
+ __le32 tdls_peer_traffic_ind_window;
+ __le32 tdls_peer_traffic_response_timeout_ms;
+ __le32 tdls_puapsd_mask;
+ __le32 tdls_puapsd_inactivity_time_ms;
+ __le32 tdls_puapsd_rx_frame_threshold;
+ __le32 teardown_notification_ms;
+ __le32 tdls_peer_kickout_threshold;
+} __packed;
+
+struct wmi_tdls_peer_capabilities {
+ __le32 peer_qos;
+ __le32 buff_sta_support;
+ __le32 off_chan_support;
+ __le32 peer_curr_operclass;
+ __le32 self_curr_operclass;
+ __le32 peer_chan_len;
+ __le32 peer_operclass_len;
+ u8 peer_operclass[WMI_TDLS_MAX_SUPP_OPER_CLASSES];
+ __le32 is_peer_responder;
+ __le32 pref_offchan_num;
+ __le32 pref_offchan_bw;
+ struct wmi_channel peer_chan_list[1];
+} __packed;
+
+struct wmi_10_4_tdls_peer_update_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 peer_state;
+ __le32 reserved[4];
+ struct wmi_tdls_peer_capabilities peer_capab;
+} __packed;
+
+enum wmi_tdls_peer_reason {
+ WMI_TDLS_TEARDOWN_REASON_TX,
+ WMI_TDLS_TEARDOWN_REASON_RSSI,
+ WMI_TDLS_TEARDOWN_REASON_SCAN,
+ WMI_TDLS_DISCONNECTED_REASON_PEER_DELETE,
+ WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT,
+ WMI_TDLS_TEARDOWN_REASON_BAD_PTR,
+ WMI_TDLS_TEARDOWN_REASON_NO_RESPONSE,
+ WMI_TDLS_ENTER_BUF_STA,
+ WMI_TDLS_EXIT_BUF_STA,
+ WMI_TDLS_ENTER_BT_BUSY_MODE,
+ WMI_TDLS_EXIT_BT_BUSY_MODE,
+ WMI_TDLS_SCAN_STARTED_EVENT,
+ WMI_TDLS_SCAN_COMPLETED_EVENT,
+};
+
+enum wmi_tdls_peer_notification {
+ WMI_TDLS_SHOULD_DISCOVER,
+ WMI_TDLS_SHOULD_TEARDOWN,
+ WMI_TDLS_PEER_DISCONNECTED,
+ WMI_TDLS_CONNECTION_TRACKER_NOTIFICATION,
+};
+
+struct wmi_tdls_peer_event {
+ struct wmi_mac_addr peer_macaddr;
+ /* see enum wmi_tdls_peer_notification*/
+ __le32 peer_status;
+ /* see enum wmi_tdls_peer_reason */
+ __le32 peer_reason;
+ __le32 vdev_id;
+} __packed;
+
+enum wmi_txbf_conf {
+ WMI_TXBF_CONF_UNSUPPORTED,
+ WMI_TXBF_CONF_BEFORE_ASSOC,
+ WMI_TXBF_CONF_AFTER_ASSOC,
+};
+
+#define WMI_CCA_DETECT_LEVEL_AUTO 0
+#define WMI_CCA_DETECT_MARGIN_AUTO 0
+
+struct wmi_pdev_set_adaptive_cca_params {
+ __le32 enable;
+ __le32 cca_detect_level;
+ __le32 cca_detect_margin;
+} __packed;
+
+enum wmi_host_platform_type {
+ WMI_HOST_PLATFORM_HIGH_PERF,
+ WMI_HOST_PLATFORM_LOW_PERF,
+};
+
+enum wmi_bss_survey_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_pdev_chan_info_req_cmd {
+ __le32 type;
+ __le32 reserved;
+} __packed;
+
+struct ath10k;
+struct ath10k_vif;
+struct ath10k_fw_stats_pdev;
+struct ath10k_fw_stats_peer;
+struct ath10k_fw_stats;
+
+int ath10k_wmi_attach(struct ath10k *ar);
+void ath10k_wmi_detach(struct ath10k *ar);
+void ath10k_wmi_free_host_mem(struct ath10k *ar);
+int ath10k_wmi_wait_for_service_ready(struct ath10k *ar);
+int ath10k_wmi_wait_for_unified_ready(struct ath10k *ar);
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
+int ath10k_wmi_connect(struct ath10k *ar);
+
+struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
+int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
+int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
+ u32 cmd_id);
+void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *arg);
+
+void ath10k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_pdev_stats_extra(const struct wmi_pdev_stats_extra *src,
+ struct ath10k_fw_stats_pdev *dst);
+void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst);
+void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
+ struct wmi_host_mem_chunks *chunks);
+void ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
+ const struct wmi_start_scan_arg *arg);
+void ath10k_wmi_set_wmm_param(struct wmi_wmm_params *params,
+ const struct wmi_wmm_params_arg *arg);
+void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
+ const struct wmi_channel_arg *arg);
+int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg);
+
+int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_mgmt_tx_compl(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_stopped(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_peer_sta_kickout(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dfs(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr, u64 tsf);
+void ath10k_wmi_event_spectral_scan(struct ath10k *ar,
+ struct wmi_phyerr_ev_arg *phyerr,
+ u64 tsf);
+void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_roam(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_profile_match(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_debug_print(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_qvit(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wlan_profile_data(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_rtt_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_tsf_measurement_report(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_rtt_error_report(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_wow_wakeup_host(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_dcs_interference(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_pdev_ftm_intg(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_gtk_offload_status(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_gtk_rekey_fail(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_delba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_addba_complete(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
+ struct sk_buff *skb);
+void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_service_ready(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_wmi_event_service_available(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_wmi_op_pull_phyerr_ev(struct ath10k *ar, const void *phyerr_buf,
+ int left_len, struct wmi_phyerr_ev_arg *arg);
+void ath10k_wmi_main_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head);
+size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head);
+void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
+ struct ath10k_fw_stats *fw_stats,
+ char *buf);
+int ath10k_wmi_op_get_vdev_subtype(struct ath10k *ar,
+ enum wmi_vdev_subtype subtype);
+int ath10k_wmi_barrier(struct ath10k *ar);
+void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
+ u32 num_tx_chain);
+void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb);
+
+#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
new file mode 100644
index 000000000..1d44227d1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "hif.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+static const struct wiphy_wowlan_support ath10k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static int ath10k_wow_vif_cleanup(struct ath10k_vif *arvif)
+{
+ struct ath10k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath10k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath10k_warn(ar, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_cleanup(struct ath10k *ar)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath10k_warn(ar, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void ath10k_wow_convert_8023_to_80211
+ (struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
+{
+ u8 hdr_8023_pattern[ETH_HLEN] = {};
+ u8 hdr_8023_bit_mask[ETH_HLEN] = {};
+ u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
+ u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
+
+ int total_len = old->pkt_offset + old->pattern_len;
+ int hdr_80211_end_offset;
+
+ struct ieee80211_hdr_3addr *new_hdr_pattern =
+ (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
+ struct ieee80211_hdr_3addr *new_hdr_mask =
+ (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
+ struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
+ struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
+ int hdr_len = sizeof(*new_hdr_pattern);
+
+ struct rfc1042_hdr *new_rfc_pattern =
+ (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
+ struct rfc1042_hdr *new_rfc_mask =
+ (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
+ int rfc_len = sizeof(*new_rfc_pattern);
+
+ memcpy(hdr_8023_pattern + old->pkt_offset,
+ old->pattern, ETH_HLEN - old->pkt_offset);
+ memcpy(hdr_8023_bit_mask + old->pkt_offset,
+ old->mask, ETH_HLEN - old->pkt_offset);
+
+ /* Copy destination address */
+ memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
+ memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
+
+ /* Copy source address */
+ memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
+ memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
+
+ /* Copy logic link type */
+ memcpy(&new_rfc_pattern->snap_type,
+ &old_hdr_pattern->h_proto,
+ sizeof(old_hdr_pattern->h_proto));
+ memcpy(&new_rfc_mask->snap_type,
+ &old_hdr_mask->h_proto,
+ sizeof(old_hdr_mask->h_proto));
+
+ /* Caculate new pkt_offset */
+ if (old->pkt_offset < ETH_ALEN)
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+ else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr3) -
+ offsetof(struct ethhdr, h_source);
+ else
+ new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
+
+ /* Caculate new hdr end offset */
+ if (total_len > ETH_HLEN)
+ hdr_80211_end_offset = hdr_len + rfc_len;
+ else if (total_len > offsetof(struct ethhdr, h_proto))
+ hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
+ else if (total_len > ETH_ALEN)
+ hdr_80211_end_offset = total_len - ETH_ALEN +
+ offsetof(struct ieee80211_hdr_3addr, addr3);
+ else
+ hdr_80211_end_offset = total_len +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+
+ new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
+
+ memcpy((u8 *)new->pattern,
+ hdr_80211_pattern + new->pkt_offset,
+ new->pattern_len);
+ memcpy((u8 *)new->mask,
+ hdr_80211_bit_mask + new->pkt_offset,
+ new->pattern_len);
+
+ if (total_len > ETH_HLEN) {
+ /* Copy frame body */
+ memcpy((u8 *)new->pattern + new->pattern_len,
+ (void *)old->pattern + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+ memcpy((u8 *)new->mask + new->pattern_len,
+ (void *)old->mask + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+
+ new->pattern_len += total_len - ETH_HLEN;
+ }
+}
+
+static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret, i;
+ unsigned long wow_mask = 0;
+ struct ath10k *ar = arvif->ar;
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int pattern_id = 0;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ /* fall through */
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ struct cfg80211_pkt_pattern new_pattern = {};
+ struct cfg80211_pkt_pattern old_pattern = patterns[i];
+ int j;
+
+ new_pattern.pattern = ath_pattern;
+ new_pattern.mask = ath_bitmask;
+ if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+ continue;
+ /* convert bytemask to bitmask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ bitmask[j] = 0xff;
+ old_pattern.mask = bitmask;
+
+ if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
+ if (patterns[i].pkt_offset < ETH_HLEN) {
+ ath10k_wow_convert_8023_to_80211(&new_pattern,
+ &old_pattern);
+ } else {
+ new_pattern = old_pattern;
+ new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
+ }
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ ret = ath10k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.mask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath10k_warn(ar, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath10k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath10k_warn(ar, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_set_wakeups(struct ath10k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath10k_vif_wow_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_enable(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->target_suspend);
+
+ ret = ath10k_wmi_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->target_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath10k_wow_wakeup(struct ath10k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->wow.wakeup_completed);
+
+ ret = ath10k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ar->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath10k_warn(ar, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_wow_cleanup(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath10k_warn(ar, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath10k_wow_enable(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath10k_hif_suspend(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath10k_wow_wakeup(ar);
+
+cleanup:
+ ath10k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features)) {
+ device_set_wakeup_enable(ar->dev, enabled);
+ }
+ mutex_unlock(&ar->conf_mutex);
+}
+
+int ath10k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features))) {
+ ret = 1;
+ goto exit;
+ }
+
+ ret = ath10k_hif_resume(ar);
+ if (ret) {
+ ath10k_warn(ar, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath10k_wow_wakeup(ar);
+ if (ret)
+ ath10k_warn(ar, "failed to wakeup from wow: %d\n", ret);
+
+exit:
+ if (ret) {
+ switch (ar->state) {
+ case ATH10K_STATE_ON:
+ ar->state = ATH10K_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH10K_STATE_OFF:
+ case ATH10K_STATE_RESTARTING:
+ case ATH10K_STATE_RESTARTED:
+ case ATH10K_STATE_UTF:
+ case ATH10K_STATE_WEDGED:
+ ath10k_warn(ar, "encountered unexpected device state %d on resume, cannot recover\n",
+ ar->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath10k_wow_init(struct ath10k *ar)
+{
+ if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features))
+ return 0;
+
+ if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
+ return -EINVAL;
+
+ ar->wow.wowlan_support = ath10k_wowlan_support;
+
+ if (ar->wmi.rx_decap_mode == ATH10K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h
new file mode 100644
index 000000000..6e810105b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/wow.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath10k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+#ifdef CONFIG_PM
+
+int ath10k_wow_init(struct ath10k *ar);
+int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath10k_wow_op_resume(struct ieee80211_hw *hw);
+void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+
+#else
+
+static inline int ath10k_wow_init(struct ath10k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */