summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath11k
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/wireless/ath/ath11k
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/wireless/ath/ath11k')
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig59
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile36
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c1312
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h46
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c1081
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h210
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2102
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h1254
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c399
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.h81
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c110
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h113
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c1927
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h406
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c4904
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h2045
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c888
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.h42
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c1190
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h1716
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c5745
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h98
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c1297
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h44
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c1380
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h974
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h2493
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c1611
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h513
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c160
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h74
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h148
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c844
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.h312
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c2858
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h437
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c9838
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h178
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c527
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c1048
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h83
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c819
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.h54
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c670
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h60
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c3335
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h522
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c780
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h36
-rw-r--r--drivers/net/wireless/ath/ath11k/rx_desc.h1505
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c1064
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.h83
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.c502
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/testmode_i.h66
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c226
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.h54
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h345
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c9796
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h6508
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.c878
-rw-r--r--drivers/net/wireless/ath/ath11k/wow.h55
64 files changed, 78009 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
new file mode 100644
index 0000000000..ad5cc6cac0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+config ATH11K
+ tristate "Qualcomm Technologies 802.11ax chipset support"
+ depends on MAC80211 && HAS_DMA
+ depends on CRYPTO_MICHAEL_MIC
+ select ATH_COMMON
+ select QCOM_QMI_HELPERS
+ help
+ This module adds support for Qualcomm Technologies 802.11ax family of
+ chipsets.
+
+ If you choose to build a module, it'll be called ath11k.
+
+config ATH11K_AHB
+ tristate "Atheros ath11k AHB support"
+ depends on ATH11K
+ depends on REMOTEPROC
+ help
+ This module adds support for AHB bus
+
+config ATH11K_PCI
+ tristate "Atheros ath11k PCI support"
+ depends on ATH11K && PCI
+ select MHI_BUS
+ select QRTR
+ select QRTR_MHI
+ help
+ This module adds support for PCIE bus
+
+config ATH11K_DEBUG
+ bool "QCA ath11k debugging"
+ depends on ATH11K
+ help
+ Enables debug support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_DEBUGFS
+ bool "QCA ath11k debugfs support"
+ depends on ATH11K && DEBUG_FS && MAC80211_DEBUGFS
+ help
+ Enable ath11k debugfs support
+
+ If unsure, say Y to make it easier to debug problems.
+
+config ATH11K_TRACING
+ bool "ath11k tracing support"
+ depends on ATH11K && EVENT_TRACING
+ help
+ Select this to use ath11k tracing infrastructure.
+
+config ATH11K_SPECTRAL
+ bool "QCA ath11k spectral scan support"
+ depends on ATH11K_DEBUGFS
+ depends on RELAY
+ help
+ Enable ath11k spectral scan support
+
+ Say Y to enable access to the FFT/spectral data via debugfs.
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
new file mode 100644
index 0000000000..cc47e01145
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: BSD-3-Clause-Clear
+obj-$(CONFIG_ATH11K) += ath11k.o
+ath11k-y += core.o \
+ hal.o \
+ hal_tx.o \
+ hal_rx.o \
+ wmi.o \
+ mac.o \
+ reg.o \
+ htc.o \
+ qmi.o \
+ dp.o \
+ dp_tx.o \
+ dp_rx.o \
+ debug.o \
+ ce.o \
+ peer.o \
+ dbring.o \
+ hw.o \
+ pcic.o
+
+ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
+ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
+ath11k-$(CONFIG_THERMAL) += thermal.o
+ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
+ath11k-$(CONFIG_PM) += wow.o
+
+obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
+ath11k_ahb-y += ahb.o
+
+obj-$(CONFIG_ATH11K_PCI) += ath11k_pci.o
+ath11k_pci-y += mhi.o pci.o
+
+# for tracing framework to find trace.h
+CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
new file mode 100644
index 0000000000..ef11c138bf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -0,0 +1,1312 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_address.h>
+#include <linux/iommu.h>
+#include "ahb.h"
+#include "debug.h"
+#include "hif.h"
+#include "qmi.h"
+#include <linux/remoteproc.h>
+#include "pcic.h"
+#include <linux/soc/qcom/smem.h>
+#include <linux/soc/qcom/smem_state.h>
+
+static const struct of_device_id ath11k_ahb_of_match[] = {
+ /* TODO: Should we change the compatible string to something similar
+ * to one that ath10k uses?
+ */
+ { .compatible = "qcom,ipq8074-wifi",
+ .data = (void *)ATH11K_HW_IPQ8074,
+ },
+ { .compatible = "qcom,ipq6018-wifi",
+ .data = (void *)ATH11K_HW_IPQ6018_HW10,
+ },
+ { .compatible = "qcom,wcn6750-wifi",
+ .data = (void *)ATH11K_HW_WCN6750_HW10,
+ },
+ { .compatible = "qcom,ipq5018-wifi",
+ .data = (void *)ATH11K_HW_IPQ5018_HW10,
+ },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
+
+#define ATH11K_IRQ_CE0_OFFSET 4
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "misc-pulse1",
+ "misc-latch",
+ "sw-exception",
+ "watchdog",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+/* enum ext_irq_num - irq numbers that can be used by external modules
+ * like datapath
+ */
+enum ext_irq_num {
+ host2wbm_desc_feed = 16,
+ host2reo_re_injection,
+ host2reo_command,
+ host2rxdma_monitor_ring3,
+ host2rxdma_monitor_ring2,
+ host2rxdma_monitor_ring1,
+ reo2host_exception,
+ wbm2host_rx_release,
+ reo2host_status,
+ reo2host_destination_ring4,
+ reo2host_destination_ring3,
+ reo2host_destination_ring2,
+ reo2host_destination_ring1,
+ rxdma2host_monitor_destination_mac3,
+ rxdma2host_monitor_destination_mac2,
+ rxdma2host_monitor_destination_mac1,
+ ppdu_end_interrupts_mac3,
+ ppdu_end_interrupts_mac2,
+ ppdu_end_interrupts_mac1,
+ rxdma2host_monitor_status_ring_mac3,
+ rxdma2host_monitor_status_ring_mac2,
+ rxdma2host_monitor_status_ring_mac1,
+ host2rxdma_host_buf_ring_mac3,
+ host2rxdma_host_buf_ring_mac2,
+ host2rxdma_host_buf_ring_mac1,
+ rxdma2host_destination_ring_mac3,
+ rxdma2host_destination_ring_mac2,
+ rxdma2host_destination_ring_mac1,
+ host2tcl_input_ring4,
+ host2tcl_input_ring3,
+ host2tcl_input_ring2,
+ host2tcl_input_ring1,
+ wbm2host_tx_completions_ring3,
+ wbm2host_tx_completions_ring2,
+ wbm2host_tx_completions_ring1,
+ tcl2host_status_ring,
+};
+
+static int
+ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector)
+{
+ return ab->pci.msi.irqs[vector];
+}
+
+static inline u32
+ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start = 0;
+
+ /* If offset lies within DP register range, use 1st window */
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = ATH11K_PCI_WINDOW_START;
+ /* If offset lies within CE register range, use 2nd window */
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ window_start = 2 * ATH11K_PCI_WINDOW_START;
+
+ return window_start;
+}
+
+static void
+ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ u32 window_start;
+
+ /* WCN6750 uses static window based register access*/
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+}
+
+static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset)
+{
+ u32 window_start;
+ u32 val;
+
+ /* WCN6750 uses static window based register access */
+ window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset);
+
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ return val;
+}
+
+static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = {
+ .wakeup = NULL,
+ .release = NULL,
+ .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750,
+ .window_write32 = ath11k_ahb_window_write32_wcn6750,
+ .window_read32 = ath11k_ahb_window_read32_wcn6750,
+};
+
+static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset)
+{
+ return ioread32(ab->mem + offset);
+}
+
+static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ iowrite32(value, ab->mem + offset);
+}
+
+static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val | BIT(bit));
+}
+
+static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset)
+{
+ u32 val;
+
+ val = ath11k_ahb_read32(ab, offset);
+ ath11k_ahb_write32(ab, offset, val & ~BIT(bit));
+}
+
+static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
+
+ ce_attr = &ab->hw_params.host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr);
+ ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ const struct ce_attr *ce_attr;
+ const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr;
+ u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr;
+
+ ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab);
+ ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab);
+
+ ce_attr = &ab->hw_params.host_ce_config[ce_id];
+ if (ce_attr->src_nentries)
+ ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr);
+
+ if (ce_attr->dest_nentries) {
+ ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr);
+ ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT,
+ ie3_reg_addr);
+ }
+}
+
+static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j;
+ int irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_enable(ab, i);
+ }
+}
+
+static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_ahb_ce_irq_disable(ab, i);
+ }
+}
+
+static int ath11k_ahb_start(struct ath11k_base *ab)
+{
+ ath11k_ahb_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+}
+
+static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_ahb_ext_irq_disable(ab);
+ ath11k_ahb_sync_ext_irqs(ab);
+}
+
+static void ath11k_ahb_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_ahb_ce_irqs_disable(ab);
+ ath11k_ahb_sync_ce_irqs(ab);
+ ath11k_ahb_kill_tasklets(ab);
+ del_timer_sync(&ab->rx_replenish_retry);
+ ath11k_ce_cleanup_pipes(ab);
+}
+
+static int ath11k_ahb_power_up(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ int ret;
+
+ ret = rproc_boot(ab_ahb->tgt_rproc);
+ if (ret)
+ ath11k_err(ab, "failed to boot the remote processor Q6\n");
+
+ return ret;
+}
+
+static void ath11k_ahb_power_down(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ rproc_shutdown(ab_ahb->tgt_rproc);
+}
+
+static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
+{
+ struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce_len = ab->hw_params.target_ce_count;
+ cfg->tgt_ce = ab->hw_params.target_ce_config;
+ cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
+ cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
+ ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
+}
+
+static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ }
+}
+
+static void ath11k_ahb_free_irq(struct ath11k_base *ab)
+{
+ int irq_idx;
+ int i;
+
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_pcic_free_irq(ab);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_ahb_free_ext_irq(ab);
+}
+
+static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
+{
+ struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath11k_ahb_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ ath11k_ahb_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
+{
+ struct ath11k_hw_params *hw = &ab->hw_params;
+ int i, j;
+ int irq;
+ int ret;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_ahb_ext_grp_napi_poll);
+
+ for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
+ if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ wbm2host_tx_completions_ring1 - j;
+ }
+
+ if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ reo2host_destination_ring1 - j;
+ }
+
+ if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_exception;
+
+ if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = wbm2host_rx_release;
+
+ if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
+ irq_grp->irqs[num_irq++] = reo2host_status;
+
+ if (j < ab->hw_params.max_radios) {
+ if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_destination_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
+ }
+
+ if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ host2rxdma_host_buf_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
+ }
+
+ if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
+ irq_grp->irqs[num_irq++] =
+ ppdu_end_interrupts_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
+ irq_grp->irqs[num_irq++] =
+ rxdma2host_monitor_status_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
+ }
+ }
+ }
+ irq_grp->num_irq = num_irq;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+
+ irq = platform_get_irq_byname(ab->pdev,
+ irq_name[irq_idx]);
+ ab->irq_num[irq_idx] = irq;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY);
+ ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler,
+ IRQF_TRIGGER_RISING,
+ irq_name[irq_idx], irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request_irq for %d\n",
+ irq);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_ahb_config_irq(struct ath11k_base *ab)
+{
+ int irq, irq_idx, i;
+ int ret;
+
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_pcic_config_irq(ab);
+
+ /* Configure CE irqs */
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
+
+ tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
+ irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
+ ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
+ IRQF_TRIGGER_RISING, irq_name[irq_idx],
+ ce_pipe);
+ if (ret)
+ return ret;
+
+ ab->irq_num[irq_idx] = irq;
+ }
+
+ /* Configure external interrupts */
+ ret = ath11k_ahb_config_ext_irq(ab);
+
+ return ret;
+}
+
+static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static int ath11k_ahb_hif_suspend(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = enable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret);
+ return ret;
+ }
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n");
+
+ return ret;
+}
+
+static int ath11k_ahb_hif_resume(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ u32 wake_irq;
+ u32 value = 0;
+ int ret;
+
+ if (!device_may_wakeup(ab->dev))
+ return -EPERM;
+
+ wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ];
+
+ ret = disable_irq_wake(wake_irq);
+ if (ret) {
+ ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret);
+ return ret;
+ }
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++,
+ ATH11K_AHB_SMP2P_SMEM_SEQ_NO);
+ value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT,
+ ATH11K_AHB_SMP2P_SMEM_MSG);
+
+ ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state,
+ ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value);
+ if (ret) {
+ ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n");
+
+ return 0;
+}
+
+static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = {
+ .start = ath11k_ahb_start,
+ .stop = ath11k_ahb_stop,
+ .read32 = ath11k_ahb_read32,
+ .write32 = ath11k_ahb_write32,
+ .read = NULL,
+ .irq_enable = ath11k_ahb_ext_irq_enable,
+ .irq_disable = ath11k_ahb_ext_irq_disable,
+ .map_service_to_pipe = ath11k_ahb_map_service_to_pipe,
+ .power_down = ath11k_ahb_power_down,
+ .power_up = ath11k_ahb_power_up,
+};
+
+static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = {
+ .start = ath11k_pcic_start,
+ .stop = ath11k_pcic_stop,
+ .read32 = ath11k_pcic_read32,
+ .write32 = ath11k_pcic_write32,
+ .read = NULL,
+ .irq_enable = ath11k_pcic_ext_irq_enable,
+ .irq_disable = ath11k_pcic_ext_irq_disable,
+ .get_msi_address = ath11k_pcic_get_msi_address,
+ .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
+ .power_down = ath11k_ahb_power_down,
+ .power_up = ath11k_ahb_power_up,
+ .suspend = ath11k_ahb_hif_suspend,
+ .resume = ath11k_ahb_hif_resume,
+ .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq,
+ .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq,
+};
+
+static int ath11k_core_get_rproc(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *dev = ab->dev;
+ struct rproc *prproc;
+ phandle rproc_phandle;
+
+ if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
+ ath11k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(rproc_phandle);
+ if (!prproc) {
+ ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n");
+ return -EPROBE_DEFER;
+ }
+ ab_ahb->tgt_rproc = prproc;
+
+ return 0;
+}
+
+static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+ phys_addr_t msi_addr_pa;
+ dma_addr_t msi_addr_iova;
+ struct resource *res;
+ int int_prop;
+ int ret;
+ int i;
+
+ ret = ath11k_pcic_init_msi_config(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init msi config: %d\n", ret);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ath11k_err(ab, "failed to fetch msi_addr\n");
+ return -ENOENT;
+ }
+
+ msi_addr_pa = res->start;
+ msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE,
+ DMA_FROM_DEVICE, 0);
+ if (dma_mapping_error(ab->dev, msi_addr_iova))
+ return -ENOMEM;
+
+ ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova);
+ ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova);
+
+ ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop);
+ if (ret)
+ return ret;
+
+ ab->pci.msi.ep_base_data = int_prop + 32;
+
+ for (i = 0; i < ab->pci.msi.config->total_vectors; i++) {
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return ret;
+
+ ab->pci.msi.irqs[i] = ret;
+ }
+
+ set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+
+ return 0;
+}
+
+static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return 0;
+
+ ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out",
+ &ab_ahb->smp2p_info.smem_bit);
+ if (IS_ERR(ab_ahb->smp2p_info.smem_state)) {
+ ath11k_err(ab, "failed to fetch smem state: %ld\n",
+ PTR_ERR(ab_ahb->smp2p_info.smem_state));
+ return PTR_ERR(ab_ahb->smp2p_info.smem_state);
+ }
+
+ return 0;
+}
+
+static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ if (!ab->hw_params.smp2p_wow_exit)
+ return;
+
+ qcom_smem_state_put(ab_ahb->smp2p_info.smem_state);
+}
+
+static int ath11k_ahb_setup_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+ struct resource *mem_res;
+ void __iomem *mem;
+
+ if (ab->hw_params.hybrid_bus_type)
+ return ath11k_ahb_setup_msi_resources(ab);
+
+ mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res);
+ if (IS_ERR(mem)) {
+ dev_err(&pdev->dev, "ioremap error\n");
+ return PTR_ERR(mem);
+ }
+
+ ab->mem = mem;
+ ab->mem_len = resource_size(mem_res);
+
+ return 0;
+}
+
+static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *dev = ab->dev;
+ struct device_node *node;
+ struct resource r;
+ int ret;
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!node)
+ return -ENOENT;
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret) {
+ dev_err(dev, "failed to resolve msa fixed region\n");
+ return ret;
+ }
+
+ ab_ahb->fw.msa_paddr = r.start;
+ ab_ahb->fw.msa_size = resource_size(&r);
+
+ node = of_parse_phandle(dev->of_node, "memory-region", 1);
+ if (!node)
+ return -ENOENT;
+
+ ret = of_address_to_resource(node, 0, &r);
+ of_node_put(node);
+ if (ret) {
+ dev_err(dev, "failed to resolve ce fixed region\n");
+ return ret;
+ }
+
+ ab_ahb->fw.ce_paddr = r.start;
+ ab_ahb->fw.ce_size = resource_size(&r);
+
+ return 0;
+}
+
+static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *host_dev = ab->dev;
+ struct platform_device_info info = {0};
+ struct iommu_domain *iommu_dom;
+ struct platform_device *pdev;
+ struct device_node *node;
+ int ret;
+
+ /* Chipsets not requiring MSA need not initialize
+ * MSA resources, return success in such cases.
+ */
+ if (!ab->hw_params.fixed_fw_mem)
+ return 0;
+
+ ret = ath11k_ahb_setup_msa_resources(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to setup msa resources\n");
+ return ret;
+ }
+
+ node = of_get_child_by_name(host_dev->of_node, "wifi-firmware");
+ if (!node) {
+ ab_ahb->fw.use_tz = true;
+ return 0;
+ }
+
+ info.fwnode = &node->fwnode;
+ info.parent = host_dev;
+ info.name = node->name;
+ info.dma_mask = DMA_BIT_MASK(32);
+
+ pdev = platform_device_register_full(&info);
+ if (IS_ERR(pdev)) {
+ of_node_put(node);
+ return PTR_ERR(pdev);
+ }
+
+ ret = of_dma_configure(&pdev->dev, node, true);
+ if (ret) {
+ ath11k_err(ab, "dma configure fail: %d\n", ret);
+ goto err_unregister;
+ }
+
+ ab_ahb->fw.dev = &pdev->dev;
+
+ iommu_dom = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu_dom) {
+ ath11k_err(ab, "failed to allocate iommu domain\n");
+ ret = -ENOMEM;
+ goto err_unregister;
+ }
+
+ ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev);
+ if (ret) {
+ ath11k_err(ab, "could not attach device: %d\n", ret);
+ goto err_iommu_free;
+ }
+
+ ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr,
+ ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size,
+ IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
+ if (ret) {
+ ath11k_err(ab, "failed to map firmware region: %d\n", ret);
+ goto err_iommu_detach;
+ }
+
+ ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr,
+ ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size,
+ IOMMU_READ | IOMMU_WRITE, GFP_KERNEL);
+ if (ret) {
+ ath11k_err(ab, "failed to map firmware CE region: %d\n", ret);
+ goto err_iommu_unmap;
+ }
+
+ ab_ahb->fw.use_tz = false;
+ ab_ahb->fw.iommu_domain = iommu_dom;
+ of_node_put(node);
+
+ return 0;
+
+err_iommu_unmap:
+ iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
+
+err_iommu_detach:
+ iommu_detach_device(iommu_dom, ab_ahb->fw.dev);
+
+err_iommu_free:
+ iommu_domain_free(iommu_dom);
+
+err_unregister:
+ platform_device_unregister(pdev);
+ of_node_put(node);
+
+ return ret;
+}
+
+static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct iommu_domain *iommu;
+ size_t unmapped_size;
+
+ /* Chipsets not requiring MSA would have not initialized
+ * MSA resources, return success in such cases.
+ */
+ if (!ab->hw_params.fixed_fw_mem)
+ return 0;
+
+ if (ab_ahb->fw.use_tz)
+ return 0;
+
+ iommu = ab_ahb->fw.iommu_domain;
+
+ unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size);
+ if (unmapped_size != ab_ahb->fw.msa_size)
+ ath11k_err(ab, "failed to unmap firmware: %zu\n",
+ unmapped_size);
+
+ unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size);
+ if (unmapped_size != ab_ahb->fw.ce_size)
+ ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n",
+ unmapped_size);
+
+ iommu_detach_device(iommu, ab_ahb->fw.dev);
+ iommu_domain_free(iommu);
+
+ platform_device_unregister(to_platform_device(ab_ahb->fw.dev));
+
+ return 0;
+}
+
+static int ath11k_ahb_probe(struct platform_device *pdev)
+{
+ struct ath11k_base *ab;
+ const struct of_device_id *of_id;
+ const struct ath11k_hif_ops *hif_ops;
+ const struct ath11k_pci_ops *pci_ops;
+ enum ath11k_hw_rev hw_rev;
+ int ret;
+
+ of_id = of_match_device(ath11k_ahb_of_match, &pdev->dev);
+ if (!of_id) {
+ dev_err(&pdev->dev, "failed to find matching device tree id\n");
+ return -EINVAL;
+ }
+
+ hw_rev = (uintptr_t)of_id->data;
+
+ switch (hw_rev) {
+ case ATH11K_HW_IPQ8074:
+ case ATH11K_HW_IPQ6018_HW10:
+ case ATH11K_HW_IPQ5018_HW10:
+ hif_ops = &ath11k_ahb_hif_ops_ipq8074;
+ pci_ops = NULL;
+ break;
+ case ATH11K_HW_WCN6750_HW10:
+ hif_ops = &ath11k_ahb_hif_ops_wcn6750;
+ pci_ops = &ath11k_ahb_pci_ops_wcn6750;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev);
+ return -EOPNOTSUPP;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n");
+ return ret;
+ }
+
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
+ ATH11K_BUS_AHB);
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath11k base\n");
+ return -ENOMEM;
+ }
+
+ ab->hif.ops = hif_ops;
+ ab->pdev = pdev;
+ ab->hw_rev = hw_rev;
+ ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
+ platform_set_drvdata(pdev, ab);
+
+ ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_core_free;
+ }
+
+ ret = ath11k_core_pre_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath11k_ahb_setup_resources(ab);
+ if (ret)
+ goto err_core_free;
+
+ ab->mem_ce = ab->mem;
+
+ if (ab->hw_params.ce_remap) {
+ const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
+ /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
+ * and the space is not contiguous, hence remapping the CE registers
+ * to a new space for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (!ab->mem_ce) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ ret = -ENOMEM;
+ goto err_core_free;
+ }
+ }
+
+ ret = ath11k_ahb_fw_resources_init(ab);
+ if (ret)
+ goto err_core_free;
+
+ ret = ath11k_ahb_setup_smp2p_handle(ab);
+ if (ret)
+ goto err_fw_deinit;
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_release_smp2p_handle;
+
+ ret = ath11k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath11k_ahb_init_qmi_ce_config(ab);
+
+ ret = ath11k_core_get_rproc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to get rproc: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath11k_ahb_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ath11k_qmi_fwreset_from_cold_boot(ab);
+
+ return 0;
+
+err_ce_free:
+ ath11k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+
+err_release_smp2p_handle:
+ ath11k_ahb_release_smp2p_handle(ab);
+
+err_fw_deinit:
+ ath11k_ahb_fw_resource_deinit(ab);
+
+err_core_free:
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static void ath11k_ahb_remove_prepare(struct ath11k_base *ab)
+{
+ unsigned long left;
+
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
+ left = wait_for_completion_timeout(&ab->driver_recovery,
+ ATH11K_AHB_RECOVERY_TIMEOUT);
+ if (!left)
+ ath11k_warn(ab, "failed to receive recovery response completion\n");
+ }
+
+ set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+ cancel_work_sync(&ab->restart_work);
+ cancel_work_sync(&ab->qmi.event_work);
+}
+
+static void ath11k_ahb_free_resources(struct ath11k_base *ab)
+{
+ struct platform_device *pdev = ab->pdev;
+
+ ath11k_ahb_free_irq(ab);
+ ath11k_hal_srng_deinit(ab);
+ ath11k_ahb_release_smp2p_handle(ab);
+ ath11k_ahb_fw_resource_deinit(ab);
+ ath11k_ce_free_pipes(ab);
+
+ if (ab->hw_params.ce_remap)
+ iounmap(ab->mem_ce);
+
+ ath11k_core_free(ab);
+ platform_set_drvdata(pdev, NULL);
+}
+
+static int ath11k_ahb_remove(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_ahb_power_down(ab);
+ ath11k_debugfs_soc_destroy(ab);
+ ath11k_qmi_deinit_service(ab);
+ goto qmi_fail;
+ }
+
+ ath11k_ahb_remove_prepare(ab);
+ ath11k_core_deinit(ab);
+
+qmi_fail:
+ ath11k_ahb_free_resources(ab);
+
+ return 0;
+}
+
+static void ath11k_ahb_shutdown(struct platform_device *pdev)
+{
+ struct ath11k_base *ab = platform_get_drvdata(pdev);
+
+ /* platform shutdown() & remove() are mutually exclusive.
+ * remove() is invoked during rmmod & shutdown() during
+ * system reboot/shutdown.
+ */
+ ath11k_ahb_remove_prepare(ab);
+
+ if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)))
+ goto free_resources;
+
+ ath11k_core_deinit(ab);
+
+free_resources:
+ ath11k_ahb_free_resources(ab);
+}
+
+static struct platform_driver ath11k_ahb_driver = {
+ .driver = {
+ .name = "ath11k",
+ .of_match_table = ath11k_ahb_of_match,
+ },
+ .probe = ath11k_ahb_probe,
+ .remove = ath11k_ahb_remove,
+ .shutdown = ath11k_ahb_shutdown,
+};
+
+module_platform_driver(ath11k_ahb_driver);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
new file mode 100644
index 0000000000..415ddfd266
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH11K_AHB_H
+#define ATH11K_AHB_H
+
+#include "core.h"
+
+#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
+
+#define ATH11K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0)
+#define ATH11K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16)
+#define ATH11K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF
+
+enum ath11k_ahb_smp2p_msg_id {
+ ATH11K_AHB_POWER_SAVE_ENTER = 1,
+ ATH11K_AHB_POWER_SAVE_EXIT,
+};
+
+struct ath11k_base;
+
+struct ath11k_ahb {
+ struct rproc *tgt_rproc;
+ struct {
+ struct device *dev;
+ struct iommu_domain *iommu_domain;
+ dma_addr_t msa_paddr;
+ u32 msa_size;
+ dma_addr_t ce_paddr;
+ u32 ce_size;
+ bool use_tz;
+ } fw;
+ struct {
+ unsigned short seq_no;
+ unsigned int smem_bit;
+ struct qcom_smem_state *smem_state;
+ } smp2p_info;
+};
+
+static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab)
+{
+ return (struct ath11k_ahb *)ab->drv_priv;
+}
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
new file mode 100644
index 0000000000..289d47ae92
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -0,0 +1,1081 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+
+const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE9: host->target WMI (mac2) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE10: target->host HTT */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE11: Not used */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+};
+
+const struct ce_attr ath11k_host_ce_config_qca6390[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+};
+
+const struct ce_attr ath11k_host_ce_config_qcn9074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 32,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ .send_cb = ath11k_htc_tx_completion_handler,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+};
+
+static bool ath11k_ce_need_shadow_fix(int ce_id)
+{
+ /* only ce4 needs shadow workaround */
+ if (ce_id == 4)
+ return true;
+ return false;
+}
+
+void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
+{
+ int i;
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++)
+ if (ath11k_ce_need_shadow_fix(i))
+ ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
+}
+
+static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
+ struct sk_buff *skb, dma_addr_t paddr)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct hal_srng *srng;
+ unsigned int write_index;
+ unsigned int nentries_mask = ring->nentries_mask;
+ u32 *desc;
+ int ret;
+
+ lockdep_assert_held(&ab->ce.ce_lock);
+
+ write_index = ring->write_index;
+
+ srng = &ab->hal.srng_list[ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOSPC;
+ goto exit;
+ }
+
+ ath11k_hal_ce_dst_set_desc(desc, paddr);
+
+ ring->skb[write_index] = skb;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ring->write_index = write_index;
+
+ pipe->rx_buf_needed--;
+
+ ret = 0;
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static int ath11k_ce_rx_post_pipe(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ if (!(pipe->dest_ring || pipe->status_ring))
+ return 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+ while (pipe->rx_buf_needed) {
+ skb = dev_alloc_skb(pipe->buf_sz);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr))) {
+ ath11k_warn(ab, "failed to dma map ce rx buf\n");
+ dev_kfree_skb_any(skb);
+ ret = -EIO;
+ goto exit;
+ }
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ ret = ath11k_ce_rx_buf_enqueue_pipe(pipe, skb, paddr);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to enqueue rx buf: %d\n", ret);
+ dma_unmap_single(ab->dev, paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ goto exit;
+ }
+ }
+
+exit:
+ spin_unlock_bh(&ab->ce.ce_lock);
+ return ret;
+}
+
+static int ath11k_ce_completed_recv_next(struct ath11k_ce_pipe *pipe,
+ struct sk_buff **skb, int *nbytes)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ u32 *desc;
+ int ret = 0;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->dest_ring->sw_index;
+ nentries_mask = pipe->dest_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->status_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *nbytes = ath11k_hal_ce_dst_status_get_length(desc);
+ if (*nbytes == 0) {
+ ret = -EIO;
+ goto err;
+ }
+
+ *skb = pipe->dest_ring->skb[sw_index];
+ pipe->dest_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->dest_ring->sw_index = sw_index;
+
+ pipe->rx_buf_needed++;
+err:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_recv_process_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ unsigned int nbytes, max_nbytes;
+ int ret;
+
+ __skb_queue_head_init(&list);
+ while (ath11k_ce_completed_recv_next(pipe, &skb, &nbytes) == 0) {
+ max_nbytes = skb->len + skb_tailroom(skb);
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath11k_warn(ab, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath11k_dbg(ab, ATH11K_DBG_CE, "rx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->recv_cb(ab, skb);
+ }
+
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret && ret != -ENOSPC) {
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ pipe->pipe_num, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+ }
+}
+
+static struct sk_buff *ath11k_ce_completed_send_next(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct hal_srng *srng;
+ unsigned int sw_index;
+ unsigned int nentries_mask;
+ struct sk_buff *skb;
+ u32 *desc;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ sw_index = pipe->src_ring->sw_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_reap_next(ab, srng);
+ if (!desc) {
+ skb = ERR_PTR(-EIO);
+ goto err_unlock;
+ }
+
+ skb = pipe->src_ring->skb[sw_index];
+
+ pipe->src_ring->skb[sw_index] = NULL;
+
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ pipe->src_ring->sw_index = sw_index;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return skb;
+}
+
+static void ath11k_ce_tx_process_cb(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+
+ __skb_queue_head_init(&list);
+ while (!IS_ERR(skb = ath11k_ce_completed_send_next(pipe))) {
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr, skb->len,
+ DMA_TO_DEVICE);
+
+ if ((!pipe->send_cb) || ab->hw_params.credit_flow) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ __skb_queue_tail(&list, skb);
+ }
+
+ while ((skb = __skb_dequeue(&list))) {
+ ath11k_dbg(ab, ATH11K_DBG_CE, "tx ce pipe %d len %d\n",
+ pipe->pipe_num, skb->len);
+ pipe->send_cb(ab, skb);
+ }
+}
+
+static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
+ struct hal_srng_params *ring_params)
+{
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ u32 addr_lo;
+ u32 addr_hi;
+ int ret;
+
+ ret = ath11k_get_user_msi_vector(ab, "CE",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+
+ if (ret)
+ return;
+
+ ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
+ ath11k_get_ce_msi_idx(ab, ce_id, &msi_data_idx);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_data_idx % msi_data_count) + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+static int ath11k_ce_init_ring(struct ath11k_base *ab,
+ struct ath11k_ce_ring *ce_ring,
+ int ce_id, enum hal_ring_type type)
+{
+ struct hal_srng_params params = { 0 };
+ int ret;
+
+ params.ring_base_paddr = ce_ring->base_addr_ce_space;
+ params.ring_base_vaddr = ce_ring->base_addr_owner_space;
+ params.num_entries = ce_ring->nentries;
+
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
+ ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
+
+ switch (type) {
+ case HAL_CE_SRC:
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
+ params.intr_batch_cntr_thres_entries = 1;
+ break;
+ case HAL_CE_DST:
+ params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
+ if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_timer_thres_us = 1024;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.low_threshold = ce_ring->nentries - 3;
+ }
+ break;
+ case HAL_CE_DST_STATUS:
+ if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.intr_batch_cntr_thres_entries = 1;
+ params.intr_timer_thres_us = 0x1000;
+ }
+ break;
+ default:
+ ath11k_warn(ab, "Invalid CE ring type %d\n", type);
+ return -EINVAL;
+ }
+
+ /* TODO: Init other params needed by HAL to init the ring */
+
+ ret = ath11k_hal_srng_setup(ab, type, ce_id, 0, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ce_id);
+ return ret;
+ }
+
+ ce_ring->hal_ring_id = ret;
+
+ if (ab->hw_params.supports_shadow_regs &&
+ ath11k_ce_need_shadow_fix(ce_id))
+ ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
+ ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
+ ce_ring->hal_ring_id);
+
+ return 0;
+}
+
+static struct ath11k_ce_ring *
+ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
+{
+ struct ath11k_ce_ring *ce_ring;
+ dma_addr_t base_addr;
+
+ ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
+ if (ce_ring == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ ce_ring->nentries = nentries;
+ ce_ring->nentries_mask = nentries - 1;
+
+ /* Legacy platforms that do not support cache
+ * coherent DMA are unsupported
+ */
+ ce_ring->base_addr_owner_space_unaligned =
+ dma_alloc_coherent(ab->dev,
+ nentries * desc_sz + CE_DESC_RING_ALIGN,
+ &base_addr, GFP_KERNEL);
+ if (!ce_ring->base_addr_owner_space_unaligned) {
+ kfree(ce_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ce_ring->base_addr_ce_space_unaligned = base_addr;
+
+ ce_ring->base_addr_owner_space = PTR_ALIGN(
+ ce_ring->base_addr_owner_space_unaligned,
+ CE_DESC_RING_ALIGN);
+ ce_ring->base_addr_ce_space = ALIGN(
+ ce_ring->base_addr_ce_space_unaligned,
+ CE_DESC_RING_ALIGN);
+
+ return ce_ring;
+}
+
+static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
+ struct ath11k_ce_ring *ring;
+ int nentries;
+ int desc_sz;
+
+ pipe->attr_flags = attr->flags;
+
+ if (attr->src_nentries) {
+ pipe->send_cb = attr->send_cb;
+ nentries = roundup_pow_of_two(attr->src_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->src_ring = ring;
+ }
+
+ if (attr->dest_nentries) {
+ pipe->recv_cb = attr->recv_cb;
+ nentries = roundup_pow_of_two(attr->dest_nentries);
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->dest_ring = ring;
+
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ring = ath11k_ce_alloc_ring(ab, nentries, desc_sz);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
+ pipe->status_ring = ring;
+ }
+
+ return 0;
+}
+
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
+
+ if (attr->src_nentries)
+ ath11k_ce_tx_process_cb(pipe);
+
+ if (pipe->recv_cb)
+ ath11k_ce_recv_process_cb(pipe);
+}
+
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[pipe_id];
+
+ if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && attr->src_nentries)
+ ath11k_ce_tx_process_cb(pipe);
+}
+EXPORT_SYMBOL(ath11k_ce_per_engine_service);
+
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id)
+{
+ struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[pipe_id];
+ struct hal_srng *srng;
+ u32 *desc;
+ unsigned int write_index, sw_index;
+ unsigned int nentries_mask;
+ int ret = 0;
+ u8 byte_swap_data = 0;
+ int num_used;
+
+ /* Check if some entries could be regained by handling tx completion if
+ * the CE has interrupts disabled and the used entries is more than the
+ * defined usage threshold.
+ */
+ if (pipe->attr_flags & CE_ATTR_DIS_INTR) {
+ spin_lock_bh(&ab->ce.ce_lock);
+ write_index = pipe->src_ring->write_index;
+
+ sw_index = pipe->src_ring->sw_index;
+
+ if (write_index >= sw_index)
+ num_used = write_index - sw_index;
+ else
+ num_used = pipe->src_ring->nentries - sw_index +
+ write_index;
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ if (num_used > ATH11K_CE_USAGE_THRESHOLD)
+ ath11k_ce_poll_send_completed(ab, pipe->pipe_num);
+ }
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ spin_lock_bh(&ab->ce.ce_lock);
+
+ write_index = pipe->src_ring->write_index;
+ nentries_mask = pipe->src_ring->nentries_mask;
+
+ srng = &ab->hal.srng_list[pipe->src_ring->hal_ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ if (unlikely(ath11k_hal_srng_src_num_free(ab, srng, false) < 1)) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_reaped(ab, srng);
+ if (!desc) {
+ ath11k_hal_srng_access_end(ab, srng);
+ ret = -ENOBUFS;
+ goto err_unlock;
+ }
+
+ if (pipe->attr_flags & CE_ATTR_BYTE_SWAP_DATA)
+ byte_swap_data = 1;
+
+ ath11k_hal_ce_src_set_desc(desc, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, transfer_id, byte_swap_data);
+
+ pipe->src_ring->skb[write_index] = skb;
+ pipe->src_ring->write_index = CE_RING_IDX_INCR(nentries_mask,
+ write_index);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ if (ath11k_ce_need_shadow_fix(pipe_id))
+ ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
+
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&srng->lock);
+
+ spin_unlock_bh(&ab->ce.ce_lock);
+
+ return ret;
+}
+
+static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
+{
+ struct ath11k_base *ab = pipe->ab;
+ struct ath11k_ce_ring *ring = pipe->dest_ring;
+ struct sk_buff *skb;
+ int i;
+
+ if (!(ring && pipe->buf_sz))
+ return;
+
+ for (i = 0; i < ring->nentries; i++) {
+ skb = ring->skb[i];
+ if (!skb)
+ continue;
+
+ ring->skb[i] = NULL;
+ dma_unmap_single(ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void ath11k_ce_shadow_config(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ab->hw_params.host_ce_config[i].src_nentries)
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_SRC, i);
+
+ if (ab->hw_params.host_ce_config[i].dest_nentries) {
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_DST, i);
+
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_DST_STATUS, i);
+ }
+ }
+}
+
+void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+
+ /* shadow is already configured */
+ if (*shadow_cfg_len)
+ return;
+
+ /* shadow isn't configured yet, configure now.
+ * non-CE srngs are configured firstly, then
+ * all CE srngs.
+ */
+ ath11k_hal_srng_shadow_config(ab);
+ ath11k_ce_shadow_config(ab);
+
+ /* get the shadow configuration */
+ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+}
+EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int pipe_num;
+
+ ath11k_ce_stop_shadow_timers(ab);
+
+ for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
+ pipe = &ab->ce.ce_pipe[pipe_num];
+ ath11k_ce_rx_pipe_cleanup(pipe);
+
+ /* Cleanup any src CE's which have interrupts disabled */
+ ath11k_ce_poll_send_completed(ab, pipe_num);
+
+ /* NOTE: Should we also clean up tx buffer in all pipes? */
+ }
+}
+EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
+
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+ ret = ath11k_ce_rx_post_pipe(pipe);
+ if (ret) {
+ if (ret == -ENOSPC)
+ continue;
+
+ ath11k_warn(ab, "failed to post rx buf to pipe: %d err: %d\n",
+ i, ret);
+ mod_timer(&ab->rx_replenish_retry,
+ jiffies + ATH11K_CE_RX_POST_RETRY_JIFFIES);
+
+ return;
+ }
+ }
+}
+EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
+
+void ath11k_ce_rx_replenish_retry(struct timer_list *t)
+{
+ struct ath11k_base *ab = from_timer(ab, t, rx_replenish_retry);
+
+ ath11k_ce_rx_post_buf(ab);
+}
+
+int ath11k_ce_init_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (pipe->src_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->src_ring, i,
+ HAL_CE_SRC);
+ if (ret) {
+ ath11k_warn(ab, "failed to init src ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->src_ring->write_index = 0;
+ pipe->src_ring->sw_index = 0;
+ }
+
+ if (pipe->dest_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->dest_ring, i,
+ HAL_CE_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest ring: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->rx_buf_needed = pipe->dest_ring->nentries ?
+ pipe->dest_ring->nentries - 2 : 0;
+
+ pipe->dest_ring->write_index = 0;
+ pipe->dest_ring->sw_index = 0;
+ }
+
+ if (pipe->status_ring) {
+ ret = ath11k_ce_init_ring(ab, pipe->status_ring, i,
+ HAL_CE_DST_STATUS);
+ if (ret) {
+ ath11k_warn(ab, "failed to init dest status ing: %d\n",
+ ret);
+ /* Should we clear any partial init */
+ return ret;
+ }
+
+ pipe->status_ring->write_index = 0;
+ pipe->status_ring->sw_index = 0;
+ }
+ }
+
+ return 0;
+}
+
+void ath11k_ce_free_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ struct ath11k_ce_ring *ce_ring;
+ int desc_sz;
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_need_shadow_fix(i))
+ ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
+
+ if (pipe->src_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
+ ce_ring = pipe->src_ring;
+ dma_free_coherent(ab->dev,
+ pipe->src_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ ce_ring->base_addr_owner_space_unaligned,
+ ce_ring->base_addr_ce_space_unaligned);
+ kfree(pipe->src_ring);
+ pipe->src_ring = NULL;
+ }
+
+ if (pipe->dest_ring) {
+ desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
+ ce_ring = pipe->dest_ring;
+ dma_free_coherent(ab->dev,
+ pipe->dest_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ ce_ring->base_addr_owner_space_unaligned,
+ ce_ring->base_addr_ce_space_unaligned);
+ kfree(pipe->dest_ring);
+ pipe->dest_ring = NULL;
+ }
+
+ if (pipe->status_ring) {
+ desc_sz =
+ ath11k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
+ ce_ring = pipe->status_ring;
+ dma_free_coherent(ab->dev,
+ pipe->status_ring->nentries * desc_sz +
+ CE_DESC_RING_ALIGN,
+ ce_ring->base_addr_owner_space_unaligned,
+ ce_ring->base_addr_ce_space_unaligned);
+ kfree(pipe->status_ring);
+ pipe->status_ring = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL(ath11k_ce_free_pipes);
+
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *pipe;
+ int i;
+ int ret;
+ const struct ce_attr *attr;
+
+ spin_lock_init(&ab->ce.ce_lock);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ attr = &ab->hw_params.host_ce_config[i];
+ pipe = &ab->ce.ce_pipe[i];
+ pipe->pipe_num = i;
+ pipe->ab = ab;
+ pipe->buf_sz = attr->src_sz_max;
+
+ ret = ath11k_ce_alloc_pipe(ab, i);
+ if (ret) {
+ /* Free any partial successful allocation */
+ ath11k_ce_free_pipes(ab);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
+
+/* For Big Endian Host, Copy Engine byte_swap is enabled
+ * When Copy Engine does byte_swap, need to byte swap again for the
+ * Host to get/put buffer content in the correct byte order
+ */
+void ath11k_ce_byte_swap(void *mem, u32 len)
+{
+ int i;
+
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ if (!mem)
+ return;
+
+ for (i = 0; i < (len / 4); i++) {
+ *(u32 *)mem = swab32(*(u32 *)mem);
+ mem += 4;
+ }
+ }
+}
+
+int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
+{
+ if (ce_id >= ab->hw_params.ce_count)
+ return -EINVAL;
+
+ return ab->hw_params.host_ce_config[ce_id].flags;
+}
+EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
new file mode 100644
index 0000000000..c0f6a0ba86
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_CE_H
+#define ATH11K_CE_H
+
+#define CE_COUNT_MAX 12
+
+/* Byte swap data words */
+#define CE_ATTR_BYTE_SWAP_DATA 2
+
+/* no interrupt on copy completion */
+#define CE_ATTR_DIS_INTR 8
+
+/* Host software's Copy Engine configuration. */
+#ifdef __BIG_ENDIAN
+#define CE_ATTR_FLAGS CE_ATTR_BYTE_SWAP_DATA
+#else
+#define CE_ATTR_FLAGS 0
+#endif
+
+/* Threshold to poll for tx completion in case of Interrupt disabled CE's */
+#define ATH11K_CE_USAGE_THRESHOLD 32
+
+void ath11k_ce_byte_swap(void *mem, u32 len);
+
+/*
+ * Directions for interconnect pipe configuration.
+ * These definitions may be used during configuration and are shared
+ * between Host and Target.
+ *
+ * Pipe Directions are relative to the Host, so PIPEDIR_IN means
+ * "coming IN over air through Target to Host" as with a WiFi Rx operation.
+ * Conversely, PIPEDIR_OUT means "going OUT from Host through Target over air"
+ * as with a WiFi Tx operation. This is somewhat awkward for the "middle-man"
+ * Target since things that are "PIPEDIR_OUT" are coming IN to the Target
+ * over the interconnect.
+ */
+#define PIPEDIR_NONE 0
+#define PIPEDIR_IN 1 /* Target-->Host, WiFi Rx direction */
+#define PIPEDIR_OUT 2 /* Host->Target, WiFi Tx direction */
+#define PIPEDIR_INOUT 3 /* bidirectional */
+#define PIPEDIR_INOUT_H2H 4 /* bidirectional, host to host */
+
+/* CE address/mask */
+#define CE_HOST_IE_ADDRESS 0x00A1803C
+#define CE_HOST_IE_2_ADDRESS 0x00A18040
+#define CE_HOST_IE_3_ADDRESS CE_HOST_IE_ADDRESS
+
+/* CE IE registers are different for IPQ5018 */
+#define CE_HOST_IPQ5018_IE_ADDRESS 0x0841804C
+#define CE_HOST_IPQ5018_IE_2_ADDRESS 0x08418050
+#define CE_HOST_IPQ5018_IE_3_ADDRESS CE_HOST_IPQ5018_IE_ADDRESS
+
+#define CE_HOST_IE_3_SHIFT 0xC
+
+#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+
+#define ATH11K_CE_RX_POST_RETRY_JIFFIES 50
+
+struct ath11k_base;
+
+/*
+ * Establish a mapping between a service/direction and a pipe.
+ * Configuration information for a Copy Engine pipe and services.
+ * Passed from Host to Target through QMI message and must be in
+ * little endian format.
+ */
+struct service_to_pipe {
+ __le32 service_id;
+ __le32 pipedir;
+ __le32 pipenum;
+};
+
+/*
+ * Configuration information for a Copy Engine pipe.
+ * Passed from Host to Target through QMI message during startup (one per CE).
+ *
+ * NOTE: Structure is shared between Host software and Target firmware!
+ */
+struct ce_pipe_config {
+ __le32 pipenum;
+ __le32 pipedir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+ __le32 reserved;
+};
+
+struct ce_ie_addr {
+ u32 ie1_reg_addr;
+ u32 ie2_reg_addr;
+ u32 ie3_reg_addr;
+};
+
+struct ce_remap {
+ u32 base;
+ u32 size;
+};
+
+struct ce_attr {
+ /* CE_ATTR_* values */
+ unsigned int flags;
+
+ /* #entries in source ring - Must be a power of 2 */
+ unsigned int src_nentries;
+
+ /*
+ * Max source send size for this CE.
+ * This is also the minimum size of a destination buffer.
+ */
+ unsigned int src_sz_max;
+
+ /* #entries in destination ring - Must be a power of 2 */
+ unsigned int dest_nentries;
+
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+ void (*send_cb)(struct ath11k_base *, struct sk_buff *);
+};
+
+#define CE_DESC_RING_ALIGN 8
+
+struct ath11k_ce_ring {
+ /* Number of entries in this ring; must be power of 2 */
+ unsigned int nentries;
+ unsigned int nentries_mask;
+
+ /* For dest ring, this is the next index to be processed
+ * by software after it was/is received into.
+ *
+ * For src ring, this is the last descriptor that was sent
+ * and completion processed by software.
+ *
+ * Regardless of src or dest ring, this is an invariant
+ * (modulo ring size):
+ * write index >= read index >= sw_index
+ */
+ unsigned int sw_index;
+ /* cached copy */
+ unsigned int write_index;
+
+ /* Start of DMA-coherent area reserved for descriptors */
+ /* Host address space */
+ void *base_addr_owner_space_unaligned;
+ /* CE address space */
+ u32 base_addr_ce_space_unaligned;
+
+ /* Actual start of descriptors.
+ * Aligned to descriptor-size boundary.
+ * Points into reserved DMA-coherent area, above.
+ */
+ /* Host address space */
+ void *base_addr_owner_space;
+
+ /* CE address space */
+ u32 base_addr_ce_space;
+
+ /* HAL ring id */
+ u32 hal_ring_id;
+
+ /* keep last */
+ struct sk_buff *skb[];
+};
+
+struct ath11k_ce_pipe {
+ struct ath11k_base *ab;
+ u16 pipe_num;
+ unsigned int attr_flags;
+ unsigned int buf_sz;
+ unsigned int rx_buf_needed;
+
+ void (*send_cb)(struct ath11k_base *, struct sk_buff *);
+ void (*recv_cb)(struct ath11k_base *, struct sk_buff *);
+
+ struct tasklet_struct intr_tq;
+ struct ath11k_ce_ring *src_ring;
+ struct ath11k_ce_ring *dest_ring;
+ struct ath11k_ce_ring *status_ring;
+ u64 timestamp;
+};
+
+struct ath11k_ce {
+ struct ath11k_ce_pipe ce_pipe[CE_COUNT_MAX];
+ /* Protects rings of all ce pipes */
+ spinlock_t ce_lock;
+ struct ath11k_hp_update_timer hp_timer[CE_COUNT_MAX];
+};
+
+extern const struct ce_attr ath11k_host_ce_config_ipq8074[];
+extern const struct ce_attr ath11k_host_ce_config_qca6390[];
+extern const struct ce_attr ath11k_host_ce_config_qcn9074[];
+
+void ath11k_ce_cleanup_pipes(struct ath11k_base *ab);
+void ath11k_ce_rx_replenish_retry(struct timer_list *t);
+void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id);
+int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
+ u16 transfer_id);
+void ath11k_ce_rx_post_buf(struct ath11k_base *ab);
+int ath11k_ce_init_pipes(struct ath11k_base *ab);
+int ath11k_ce_alloc_pipes(struct ath11k_base *ab);
+void ath11k_ce_free_pipes(struct ath11k_base *ab);
+int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id);
+void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id);
+void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len);
+void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
new file mode 100644
index 0000000000..fc7c4564a7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -0,0 +1,2102 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/remoteproc.h>
+#include <linux/firmware.h>
+#include <linux/of.h>
+
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "hif.h"
+#include "wow.h"
+
+unsigned int ath11k_debug_mask;
+EXPORT_SYMBOL(ath11k_debug_mask);
+module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+
+static unsigned int ath11k_crypto_mode;
+module_param_named(crypto_mode, ath11k_crypto_mode, uint, 0644);
+MODULE_PARM_DESC(crypto_mode, "crypto mode: 0-hardware, 1-software");
+
+/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
+unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
+module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
+MODULE_PARM_DESC(frame_mode,
+ "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
+
+bool ath11k_ftm_mode;
+module_param_named(ftm_mode, ath11k_ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
+
+static const struct ath11k_hw_params ath11k_hw_params[] = {
+ {
+ .hw_rev = ATH11K_HW_IPQ8074,
+ .name = "ipq8074 hw2.0",
+ .fw = {
+ .dir = "IPQ8074/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &ipq8074_ops,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .internal_sleep_clock = false,
+ .regs = &ipq8074_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+ .host_ce_config = ath11k_host_ce_config_ipq8074,
+ .ce_count = 12,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
+ .target_ce_count = 11,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
+ .svc_to_ce_map_len = 21,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = false,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+
+ .spectral = {
+ .fft_sz = 2,
+ /* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
+ * so added pad size as 2 bytes to compensate the BIN size
+ */
+ .fft_pad_sz = 2,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ .fragment_160mhz = true,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ .supports_sta_ps = false,
+ .coldboot_cal_mm = true,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = true,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .supports_regdb = false,
+ .fix_l1ss = true,
+ .credit_flow = false,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ },
+ {
+ .hw_rev = ATH11K_HW_IPQ6018_HW10,
+ .name = "ipq6018 hw1.0",
+ .fw = {
+ .dir = "IPQ6018/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 2,
+ .bdf_addr = 0x4ABC0000,
+ .hw_ops = &ipq6018_ops,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .internal_sleep_clock = false,
+ .regs = &ipq8074_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+ .host_ce_config = ath11k_host_ce_config_ipq8074,
+ .ce_count = 12,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
+ .target_ce_count = 11,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
+ .svc_to_ce_map_len = 19,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = false,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+
+ .spectral = {
+ .fft_sz = 4,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ .fragment_160mhz = true,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ .supports_sta_ps = false,
+ .coldboot_cal_mm = true,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = true,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .supports_regdb = false,
+ .fix_l1ss = true,
+ .credit_flow = false,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
+ },
+ {
+ .name = "qca6390 hw2.0",
+ .hw_rev = ATH11K_HW_QCA6390_HW20,
+ .fw = {
+ .dir = "QCA6390/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &qca6390_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &qca6390_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
+ .supports_regdb = false,
+ .fix_l1ss = true,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0171ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ },
+ {
+ .name = "qcn9074 hw1.0",
+ .hw_rev = ATH11K_HW_QCN9074_HW10,
+ .fw = {
+ .dir = "QCN9074/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .single_pdev_only = false,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074,
+ .hw_ops = &qcn9074_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qcn9074,
+ .internal_sleep_clock = false,
+ .regs = &qcn9074_regs,
+ .host_ce_config = ath11k_host_ce_config_qcn9074,
+ .ce_count = 6,
+ .target_ce_config = ath11k_target_ce_config_wlan_qcn9074,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qcn9074,
+ .svc_to_ce_map_len = 18,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+
+ .spectral = {
+ .fft_sz = 2,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 16,
+ .fft_hdr_len = 24,
+ .max_fft_bins = 1024,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .full_monitor_mode = true,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ .supports_sta_ps = false,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = true,
+ .fw_mem_mode = 2,
+ .num_vdevs = 8,
+ .num_peers = 128,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .supports_regdb = false,
+ .fix_l1ss = true,
+ .credit_flow = false,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .supports_dynamic_smps_6ghz = true,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = true,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
+ },
+ {
+ .name = "wcn6855 hw2.0",
+ .hw_rev = ATH11K_HW_WCN6855_HW20,
+ .fw = {
+ .dir = "WCN6855/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .full_monitor_mode = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ },
+ {
+ .name = "wcn6855 hw2.1",
+ .hw_rev = ATH11K_HW_WCN6855_HW21,
+ .fw = {
+ .dir = "WCN6855/hw2.1",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6855_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &wcn6855_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = false,
+ .coldboot_cal_ftm = false,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = true,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = true,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = true,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {
+ .start = 0x01400000,
+ .end = 0x0177ffff,
+ },
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = true,
+ },
+ {
+ .name = "wcn6750 hw1.0",
+ .hw_rev = ATH11K_HW_WCN6750_HW10,
+ .fw = {
+ .dir = "WCN6750/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = 1,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &wcn6750_ops,
+ .ring_mask = &ath11k_hw_ring_mask_wcn6750,
+ .internal_sleep_clock = false,
+ .regs = &wcn6750_regs,
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
+ .single_pdev_only = true,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ .fragment_160mhz = false,
+ },
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
+ .supports_sta_ps = true,
+ .coldboot_cal_mm = true,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_suspend = false,
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .supports_regdb = true,
+ .fix_l1ss = false,
+ .credit_flow = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_wcn6750,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = false,
+ .supports_rssi_stats = true,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = true,
+ .dbr_debug_support = false,
+ .global_reset = false,
+ .bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+ .static_window_map = true,
+ .hybrid_bus_type = true,
+ .fixed_fw_mem = true,
+ .support_off_channel_tx = true,
+ .supports_multi_bssid = true,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = false,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE_WCN6750,
+ .smp2p_wow_exit = true,
+ .support_fw_mac_sequence = true,
+ },
+ {
+ .hw_rev = ATH11K_HW_IPQ5018_HW10,
+ .name = "ipq5018 hw1.0",
+ .fw = {
+ .dir = "IPQ5018/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
+ },
+ .max_radios = MAX_RADIOS_5018,
+ .bdf_addr = 0x4BA00000,
+ /* hal_desc_sz and hw ops are similar to qcn9074 */
+ .hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
+ .qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .credit_flow = false,
+ .max_tx_ring = 1,
+ .spectral = {
+ .fft_sz = 2,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 16,
+ .fft_hdr_len = 24,
+ .max_fft_bins = 1024,
+ },
+ .internal_sleep_clock = false,
+ .regs = &ipq5018_regs,
+ .hw_ops = &ipq5018_ops,
+ .host_ce_config = ath11k_host_ce_config_qcn9074,
+ .ce_count = CE_CNT_5018,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq5018,
+ .target_ce_count = TARGET_CE_CNT_5018,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq5018,
+ .svc_to_ce_map_len = SVC_CE_MAP_LEN_5018,
+ .ce_ie_addr = &ath11k_ce_ie_addr_ipq5018,
+ .ce_remap = &ath11k_ce_remap_ipq5018,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = RXDMA_PER_PDEV_5018,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = false,
+ .supports_sta_ps = false,
+ .supports_shadow_regs = false,
+ .fw_mem_mode = 0,
+ .num_vdevs = 16 + 1,
+ .num_peers = 512,
+ .supports_regdb = false,
+ .idle_ps = false,
+ .supports_suspend = false,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
+ .single_pdev_only = false,
+ .coldboot_cal_mm = true,
+ .coldboot_cal_ftm = true,
+ .cbcal_restart_fw = true,
+ .fix_l1ss = true,
+ .supports_dynamic_smps_6ghz = false,
+ .alloc_cacheable_memory = true,
+ .supports_rssi_stats = false,
+ .fw_wmi_diag_event = false,
+ .current_cc_support = false,
+ .dbr_debug_support = true,
+ .global_reset = false,
+ .bios_sar_capa = NULL,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
+ .static_window_map = false,
+ .hybrid_bus_type = false,
+ .fixed_fw_mem = false,
+ .support_off_channel_tx = false,
+ .supports_multi_bssid = false,
+
+ .sram_dump = {},
+
+ .tcl_ring_retry = true,
+ .tx_ring_size = DP_TCL_DATA_RING_SIZE,
+ .smp2p_wow_exit = false,
+ .support_fw_mac_sequence = false,
+ },
+};
+
+static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
+{
+ WARN_ON(!ab->hw_params.single_pdev_only);
+
+ return &ab->pdevs[0];
+}
+
+void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+void ath11k_fw_stats_init(struct ath11k *ar)
+{
+ INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->fw_stats.bcn);
+
+ init_completion(&ar->fw_stats_complete);
+}
+
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
+{
+ ath11k_fw_stats_pdevs_free(&stats->pdevs);
+ ath11k_fw_stats_vdevs_free(&stats->vdevs);
+ ath11k_fw_stats_bcn_free(&stats->bcn);
+}
+
+bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab)
+{
+ if (!ath11k_cold_boot_cal)
+ return false;
+
+ if (ath11k_ftm_mode)
+ return ab->hw_params.coldboot_cal_ftm;
+
+ else
+ return ab->hw_params.coldboot_cal_mm;
+}
+
+int ath11k_core_suspend(struct ath11k_base *ab)
+{
+ int ret;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ if (!ab->hw_params.supports_suspend)
+ return -EOPNOTSUPP;
+
+ /* so far single_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
+ */
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, true);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_wow_enable(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ab, false);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
+ ret);
+ return ret;
+ }
+
+ ath11k_ce_stop_shadow_timers(ab);
+ ath11k_dp_stop_shadow_timers(ab);
+
+ ath11k_hif_irq_disable(ab);
+ ath11k_hif_ce_irq_disable(ab);
+
+ ret = ath11k_hif_suspend(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_suspend);
+
+int ath11k_core_resume(struct ath11k_base *ab)
+{
+ int ret;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ if (!ab->hw_params.supports_suspend)
+ return -EOPNOTSUPP;
+
+ /* so far signle_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
+ */
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
+
+ ret = ath11k_hif_resume(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret);
+ return ret;
+ }
+
+ ath11k_hif_ce_irq_enable(ab);
+ ath11k_hif_irq_enable(ab);
+
+ ret = ath11k_dp_rx_pktlog_start(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_wow_wakeup(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_resume);
+
+static void ath11k_core_check_cc_code_bdfext(const struct dmi_header *hdr, void *data)
+{
+ struct ath11k_base *ab = data;
+ const char *magic = ATH11K_SMBIOS_BDF_EXT_MAGIC;
+ struct ath11k_smbios_bdf *smbios = (struct ath11k_smbios_bdf *)hdr;
+ ssize_t copied;
+ size_t len;
+ int i;
+
+ if (ab->qmi.target.bdf_ext[0] != '\0')
+ return;
+
+ if (hdr->type != ATH11K_SMBIOS_BDF_EXT_TYPE)
+ return;
+
+ if (hdr->length != ATH11K_SMBIOS_BDF_EXT_LENGTH) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "wrong smbios bdf ext type length (%d).\n",
+ hdr->length);
+ return;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ switch (smbios->country_code_flag) {
+ case ATH11K_SMBIOS_CC_ISO:
+ ab->new_alpha2[0] = (smbios->cc_code >> 8) & 0xff;
+ ab->new_alpha2[1] = smbios->cc_code & 0xff;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios cc_code %c%c\n",
+ ab->new_alpha2[0], ab->new_alpha2[1]);
+ break;
+ case ATH11K_SMBIOS_CC_WW:
+ ab->new_alpha2[0] = '0';
+ ab->new_alpha2[1] = '0';
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "smbios worldwide regdomain\n");
+ break;
+ default:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "ignore smbios country code setting %d\n",
+ smbios->country_code_flag);
+ break;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!smbios->bdf_enabled) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "bdf variant name not found.\n");
+ return;
+ }
+
+ /* Only one string exists (per spec) */
+ if (memcmp(smbios->bdf_ext, magic, strlen(magic)) != 0) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant magic does not match.\n");
+ return;
+ }
+
+ len = min_t(size_t,
+ strlen(smbios->bdf_ext), sizeof(ab->qmi.target.bdf_ext));
+ for (i = 0; i < len; i++) {
+ if (!isascii(smbios->bdf_ext[i]) || !isprint(smbios->bdf_ext[i])) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant name contains non ascii chars.\n");
+ return;
+ }
+ }
+
+ /* Copy extension name without magic prefix */
+ copied = strscpy(ab->qmi.target.bdf_ext, smbios->bdf_ext + strlen(magic),
+ sizeof(ab->qmi.target.bdf_ext));
+ if (copied < 0) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate\n");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found and validated bdf variant smbios_type 0x%x bdf %s\n",
+ ATH11K_SMBIOS_BDF_EXT_TYPE, ab->qmi.target.bdf_ext);
+}
+
+int ath11k_core_check_smbios(struct ath11k_base *ab)
+{
+ ab->qmi.target.bdf_ext[0] = '\0';
+ dmi_walk(ath11k_core_check_cc_code_bdfext, ab);
+
+ if (ab->qmi.target.bdf_ext[0] == '\0')
+ return -ENODATA;
+
+ return 0;
+}
+
+int ath11k_core_check_dt(struct ath11k_base *ab)
+{
+ size_t max_len = sizeof(ab->qmi.target.bdf_ext);
+ const char *variant = NULL;
+ struct device_node *node;
+
+ node = ab->dev->of_node;
+ if (!node)
+ return -ENOENT;
+
+ of_property_read_string(node, "qcom,ath11k-calibration-variant",
+ &variant);
+ if (!variant)
+ return -ENODATA;
+
+ if (strscpy(ab->qmi.target.bdf_ext, variant, max_len) < 0)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "bdf variant string is longer than the buffer can accommodate (variant: %s)\n",
+ variant);
+
+ return 0;
+}
+
+static int __ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len, bool with_variant,
+ bool bus_type_mode)
+{
+ /* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
+ char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
+
+ if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
+ scnprintf(variant, sizeof(variant), ",variant=%s",
+ ab->qmi.target.bdf_ext);
+
+ switch (ab->id.bdf_search) {
+ case ATH11K_BDF_SEARCH_BUS_AND_BOARD:
+ if (bus_type_mode)
+ scnprintf(name, name_len,
+ "bus=%s",
+ ath11k_bus_str(ab->hif.bus));
+ else
+ scnprintf(name, name_len,
+ "bus=%s,vendor=%04x,device=%04x,subsystem-vendor=%04x,subsystem-device=%04x,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath11k_bus_str(ab->hif.bus),
+ ab->id.vendor, ab->id.device,
+ ab->id.subsystem_vendor,
+ ab->id.subsystem_device,
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id,
+ variant);
+ break;
+ default:
+ scnprintf(name, name_len,
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
+ ath11k_bus_str(ab->hif.bus),
+ ab->qmi.target.chip_id,
+ ab->qmi.target.board_id, variant);
+ break;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board name '%s'\n", name);
+
+ return 0;
+}
+
+static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, true, false);
+}
+
+static int ath11k_core_create_fallback_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, false, false);
+}
+
+static int ath11k_core_create_bus_type_board_name(struct ath11k_base *ab, char *name,
+ size_t name_len)
+{
+ return __ath11k_core_create_board_name(ab, name, name_len, false, true);
+}
+
+const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
+ const char *file)
+{
+ const struct firmware *fw;
+ char path[100];
+ int ret;
+
+ if (file == NULL)
+ return ERR_PTR(-ENOENT);
+
+ ath11k_core_create_firmware_path(ab, file, path, sizeof(path));
+
+ ret = firmware_request_nowarn(&fw, path, ab->dev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "firmware request %s size %zu\n",
+ path, fw->size);
+
+ return fw;
+}
+
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ if (!IS_ERR(bd->fw))
+ release_firmware(bd->fw);
+
+ memset(bd, 0, sizeof(*bd));
+}
+
+static int ath11k_core_parse_bd_ie_board(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const void *buf, size_t buf_len,
+ const char *boardname,
+ int ie_id,
+ int name_id,
+ int data_id)
+{
+ const struct ath11k_fw_ie *hdr;
+ bool name_match_found;
+ int ret, board_ie_id;
+ size_t board_ie_len;
+ const void *board_ie_data;
+
+ name_match_found = false;
+
+ /* go through ATH11K_BD_IE_BOARD_/ATH11K_BD_IE_REGDB_ elements */
+ while (buf_len > sizeof(struct ath11k_fw_ie)) {
+ hdr = buf;
+ board_ie_id = le32_to_cpu(hdr->id);
+ board_ie_len = le32_to_cpu(hdr->len);
+ board_ie_data = hdr->data;
+
+ buf_len -= sizeof(*hdr);
+ buf += sizeof(*hdr);
+
+ if (buf_len < ALIGN(board_ie_len, 4)) {
+ ath11k_err(ab, "invalid %s length: %zu < %zu\n",
+ ath11k_bd_ie_type_str(ie_id),
+ buf_len, ALIGN(board_ie_len, 4));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (board_ie_id == name_id) {
+ ath11k_dbg_dump(ab, ATH11K_DBG_BOOT, "board name", "",
+ board_ie_data, board_ie_len);
+
+ if (board_ie_len != strlen(boardname))
+ goto next;
+
+ ret = memcmp(board_ie_data, boardname, strlen(boardname));
+ if (ret)
+ goto next;
+
+ name_match_found = true;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found match %s for name '%s'",
+ ath11k_bd_ie_type_str(ie_id),
+ boardname);
+ } else if (board_ie_id == data_id) {
+ if (!name_match_found)
+ /* no match found */
+ goto next;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "found %s for '%s'",
+ ath11k_bd_ie_type_str(ie_id),
+ boardname);
+
+ bd->data = board_ie_data;
+ bd->len = board_ie_len;
+
+ ret = 0;
+ goto out;
+ } else {
+ ath11k_warn(ab, "unknown %s id found: %d\n",
+ ath11k_bd_ie_type_str(ie_id),
+ board_ie_id);
+ }
+next:
+ /* jump over the padding */
+ board_ie_len = ALIGN(board_ie_len, 4);
+
+ buf_len -= board_ie_len;
+ buf += board_ie_len;
+ }
+
+ /* no match found */
+ ret = -ENOENT;
+
+out:
+ return ret;
+}
+
+static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *boardname,
+ int ie_id_match,
+ int name_id,
+ int data_id)
+{
+ size_t len, magic_len;
+ const u8 *data;
+ char *filename, filepath[100];
+ size_t ie_len;
+ struct ath11k_fw_ie *hdr;
+ int ret, ie_id;
+
+ filename = ATH11K_BOARD_API2_FILE;
+
+ if (!bd->fw)
+ bd->fw = ath11k_core_firmware_request(ab, filename);
+
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ data = bd->fw->data;
+ len = bd->fw->size;
+
+ ath11k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+
+ /* magic has extra null byte padded */
+ magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
+ if (len < magic_len) {
+ ath11k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
+ filepath, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (memcmp(data, ATH11K_BOARD_MAGIC, magic_len)) {
+ ath11k_err(ab, "found invalid board magic\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* magic is padded to 4 bytes */
+ magic_len = ALIGN(magic_len, 4);
+ if (len < magic_len) {
+ ath11k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
+ filepath, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ data += magic_len;
+ len -= magic_len;
+
+ while (len > sizeof(struct ath11k_fw_ie)) {
+ hdr = (struct ath11k_fw_ie *)data;
+ ie_id = le32_to_cpu(hdr->id);
+ ie_len = le32_to_cpu(hdr->len);
+
+ len -= sizeof(*hdr);
+ data = hdr->data;
+
+ if (len < ALIGN(ie_len, 4)) {
+ ath11k_err(ab, "invalid length for board ie_id %d ie_len %zu len %zu\n",
+ ie_id, ie_len, len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (ie_id == ie_id_match) {
+ ret = ath11k_core_parse_bd_ie_board(ab, bd, data,
+ ie_len,
+ boardname,
+ ie_id_match,
+ name_id,
+ data_id);
+ if (ret == -ENOENT)
+ /* no match found, continue */
+ goto next;
+ else if (ret)
+ /* there was an error, bail out */
+ goto err;
+ /* either found or error, so stop searching */
+ goto out;
+ }
+next:
+ /* jump over the padding */
+ ie_len = ALIGN(ie_len, 4);
+
+ len -= ie_len;
+ data += ie_len;
+ }
+
+out:
+ if (!bd->data || !bd->len) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to fetch %s for %s from %s\n",
+ ath11k_bd_ie_type_str(ie_id_match),
+ boardname, filepath);
+ ret = -ENODATA;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath11k_core_free_bdf(ab, bd);
+ return ret;
+}
+
+int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *name)
+{
+ bd->fw = ath11k_core_firmware_request(ab, name);
+
+ if (IS_ERR(bd->fw))
+ return PTR_ERR(bd->fw);
+
+ bd->data = bd->fw->data;
+ bd->len = bd->fw->size;
+
+ return 0;
+}
+
+#define BOARD_NAME_SIZE 200
+int ath11k_core_fetch_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
+ char *filename, filepath[100];
+ int ret;
+
+ filename = ATH11K_BOARD_API2_FILE;
+
+ ret = ath11k_core_create_board_name(ab, boardname, sizeof(boardname));
+ if (ret) {
+ ath11k_err(ab, "failed to create board name: %d", ret);
+ return ret;
+ }
+
+ ab->bd_api = 2;
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH11K_BD_IE_BOARD,
+ ATH11K_BD_IE_BOARD_NAME,
+ ATH11K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto success;
+
+ ret = ath11k_core_create_fallback_board_name(ab, fallback_boardname,
+ sizeof(fallback_boardname));
+ if (ret) {
+ ath11k_err(ab, "failed to create fallback board name: %d", ret);
+ return ret;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, fallback_boardname,
+ ATH11K_BD_IE_BOARD,
+ ATH11K_BD_IE_BOARD_NAME,
+ ATH11K_BD_IE_BOARD_DATA);
+ if (!ret)
+ goto success;
+
+ ab->bd_api = 1;
+ ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_DEFAULT_BOARD_FILE);
+ if (ret) {
+ ath11k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+ ath11k_err(ab, "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
+ if (memcmp(boardname, fallback_boardname, strlen(boardname)))
+ ath11k_err(ab, "failed to fetch board data for %s from %s\n",
+ fallback_boardname, filepath);
+
+ ath11k_err(ab, "failed to fetch board.bin from %s\n",
+ ab->hw_params.fw.dir);
+ return ret;
+ }
+
+success:
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "using board api %d\n", ab->bd_api);
+ return 0;
+}
+
+int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd)
+{
+ char boardname[BOARD_NAME_SIZE], default_boardname[BOARD_NAME_SIZE];
+ int ret;
+
+ ret = ath11k_core_create_board_name(ab, boardname, BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to create board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, boardname,
+ ATH11K_BD_IE_REGDB,
+ ATH11K_BD_IE_REGDB_NAME,
+ ATH11K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath11k_core_create_bus_type_board_name(ab, default_boardname,
+ BOARD_NAME_SIZE);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "failed to create default board name for regdb: %d", ret);
+ goto exit;
+ }
+
+ ret = ath11k_core_fetch_board_data_api_n(ab, bd, default_boardname,
+ ATH11K_BD_IE_REGDB,
+ ATH11K_BD_IE_REGDB_NAME,
+ ATH11K_BD_IE_REGDB_DATA);
+ if (!ret)
+ goto exit;
+
+ ret = ath11k_core_fetch_board_data_api_1(ab, bd, ATH11K_REGDB_FILE_NAME);
+ if (ret)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "failed to fetch %s from %s\n",
+ ATH11K_REGDB_FILE_NAME, ab->hw_params.fw.dir);
+
+exit:
+ if (!ret)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "fetched regdb\n");
+
+ return ret;
+}
+
+static void ath11k_core_stop(struct ath11k_base *ab)
+{
+ if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ ath11k_qmi_firmware_stop(ab);
+
+ ath11k_hif_stop(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ /* De-Init of components as needed */
+}
+
+static int ath11k_core_soc_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ if (ath11k_ftm_mode) {
+ ab->fw_mode = ATH11K_FIRMWARE_MODE_FTM;
+ ath11k_info(ab, "Booting in factory test mode\n");
+ }
+
+ ret = ath11k_qmi_init_service(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize qmi :%d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_debugfs_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create ath11k debugfs\n");
+ goto err_qmi_deinit;
+ }
+
+ ret = ath11k_hif_power_up(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to power up :%d\n", ret);
+ goto err_debugfs_reg;
+ }
+
+ return 0;
+
+err_debugfs_reg:
+ ath11k_debugfs_soc_destroy(ab);
+err_qmi_deinit:
+ ath11k_qmi_deinit_service(ab);
+ return ret;
+}
+
+static void ath11k_core_soc_destroy(struct ath11k_base *ab)
+{
+ ath11k_debugfs_soc_destroy(ab);
+ ath11k_dp_free(ab);
+ ath11k_reg_free(ab);
+ ath11k_qmi_deinit_service(ab);
+}
+
+static int ath11k_core_pdev_create(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_debugfs_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_pdev_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach DP pdev: %d\n", ret);
+ goto err_pdev_debug;
+ }
+
+ ret = ath11k_mac_register(ab);
+ if (ret) {
+ ath11k_err(ab, "failed register the radio with mac80211: %d\n", ret);
+ goto err_dp_pdev_free;
+ }
+
+ ret = ath11k_thermal_register(ab);
+ if (ret) {
+ ath11k_err(ab, "could not register thermal device: %d\n",
+ ret);
+ goto err_mac_unregister;
+ }
+
+ ret = ath11k_spectral_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init spectral %d\n", ret);
+ goto err_thermal_unregister;
+ }
+
+ return 0;
+
+err_thermal_unregister:
+ ath11k_thermal_unregister(ab);
+err_mac_unregister:
+ ath11k_mac_unregister(ab);
+err_dp_pdev_free:
+ ath11k_dp_pdev_free(ab);
+err_pdev_debug:
+ ath11k_debugfs_pdev_destroy(ab);
+
+ return ret;
+}
+
+static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
+{
+ ath11k_spectral_deinit(ab);
+ ath11k_thermal_unregister(ab);
+ ath11k_mac_unregister(ab);
+ ath11k_hif_irq_disable(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_debugfs_pdev_destroy(ab);
+}
+
+static int ath11k_core_start(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_wmi_attach(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to attach wmi: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_htc_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init htc: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_hif_start(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to start HIF: %d\n", ret);
+ goto err_wmi_detach;
+ }
+
+ ret = ath11k_htc_wait_target(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_dp_htt_connect(&ab->dp);
+ if (ret) {
+ ath11k_err(ab, "failed to connect to HTT: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_connect(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to connect wmi: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_htc_start(&ab->htc);
+ if (ret) {
+ ath11k_err(ab, "failed to start HTC: %d\n", ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_wmi_wait_for_service_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi service ready event: %d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ret = ath11k_mac_allocate(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create new hw device with mac80211 :%d\n",
+ ret);
+ goto err_hif_stop;
+ }
+
+ ath11k_dp_pdev_pre_alloc(ab);
+
+ ret = ath11k_dp_pdev_reo_setup(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
+ goto err_mac_destroy;
+ }
+
+ ret = ath11k_wmi_cmd_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send wmi init cmd: %d\n", ret);
+ goto err_reo_cleanup;
+ }
+
+ ret = ath11k_wmi_wait_for_unified_ready(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to receive wmi unified ready event: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ /* put hardware to DBS mode */
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) {
+ ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
+ if (ret) {
+ ath11k_err(ab, "failed to send dbs mode: %d\n", ret);
+ goto err_hif_stop;
+ }
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ver_req_msg(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to send htt version request message: %d\n",
+ ret);
+ goto err_reo_cleanup;
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+err_mac_destroy:
+ ath11k_mac_destroy(ab);
+err_hif_stop:
+ ath11k_hif_stop(ab);
+err_wmi_detach:
+ ath11k_wmi_detach(ab);
+
+ return ret;
+}
+
+static int ath11k_core_start_firmware(struct ath11k_base *ab,
+ enum ath11k_firmware_mode mode)
+{
+ int ret;
+
+ ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
+ &ab->qmi.ce_cfg.shadow_reg_v2_len);
+
+ ret = ath11k_qmi_firmware_start(ab, mode);
+ if (ret) {
+ ath11k_err(ab, "failed to send firmware start: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_start_firmware(ab, ab->fw_mode);
+ if (ret) {
+ ath11k_err(ab, "failed to start firmware: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_ce_init_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to initialize CE: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ ret = ath11k_dp_alloc(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init DP: %d\n", ret);
+ goto err_firmware_stop;
+ }
+
+ switch (ath11k_crypto_mode) {
+ case ATH11K_CRYPT_MODE_SW:
+ set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ case ATH11K_CRYPT_MODE_HW:
+ clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ default:
+ ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
+ return -EINVAL;
+ }
+
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+
+ mutex_lock(&ab->core_lock);
+ ret = ath11k_core_start(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to start core: %d\n", ret);
+ goto err_dp_free;
+ }
+
+ ret = ath11k_core_pdev_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create pdev core: %d\n", ret);
+ goto err_core_stop;
+ }
+ ath11k_hif_irq_enable(ab);
+ mutex_unlock(&ab->core_lock);
+
+ return 0;
+
+err_core_stop:
+ ath11k_core_stop(ab);
+ ath11k_mac_destroy(ab);
+err_dp_free:
+ ath11k_dp_free(ab);
+ mutex_unlock(&ab->core_lock);
+err_firmware_stop:
+ ath11k_qmi_firmware_stop(ab);
+
+ return ret;
+}
+
+static int ath11k_core_reconfigure_on_crash(struct ath11k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->core_lock);
+ ath11k_thermal_unregister(ab);
+ ath11k_hif_irq_disable(ab);
+ ath11k_dp_pdev_free(ab);
+ ath11k_spectral_deinit(ab);
+ ath11k_hif_stop(ab);
+ ath11k_wmi_detach(ab);
+ ath11k_dp_pdev_reo_cleanup(ab);
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_dp_free(ab);
+ ath11k_hal_srng_deinit(ab);
+
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ return ret;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret)
+ goto err_hal_srng_deinit;
+
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+
+ return 0;
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+ return ret;
+}
+
+void ath11k_core_halt(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ar->num_created_vdevs = 0;
+ ar->allocated_vdev_map = 0;
+
+ ath11k_mac_scan_finish(ar);
+ ath11k_mac_peer_cleanup_all(ar);
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+ cancel_work_sync(&ab->update_11d_work);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx], NULL);
+ synchronize_rcu();
+ INIT_LIST_HEAD(&ar->arvifs);
+ idr_init(&ar->txmgmt_idr);
+}
+
+static void ath11k_update_11d(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ struct wmi_set_current_country_params set_current_param = {};
+ int ret, i;
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n",
+ set_current_param.alpha2[0],
+ set_current_param.alpha2[1]);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "pdev id %d failed set current country code: %d\n",
+ i, ret);
+ }
+}
+
+void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ spin_lock_bh(&ab->base_lock);
+ ab->stats.fw_crash_counter++;
+ spin_unlock_bh(&ab->base_lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF ||
+ ar->state == ATH11K_STATE_FTM)
+ continue;
+
+ ieee80211_stop_queues(ar->hw);
+ ath11k_mac_drain_tx(ar);
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ complete(&ar->scan.started);
+ complete_all(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->peer_delete_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
+ complete(&ar->bss_survey_done);
+ complete(&ar->thermal.wmi_sync);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ wake_up(&ar->txmgmt_empty_waitq);
+
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ }
+
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+ wake_up(&ab->peer_mapping_wq);
+
+ reinit_completion(&ab->driver_recovery);
+}
+
+static void ath11k_core_post_reconfigure_recovery(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar || ar->state == ATH11K_STATE_OFF)
+ continue;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_ON:
+ ar->state = ATH11K_STATE_RESTARTING;
+ ath11k_core_halt(ar);
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH11K_STATE_OFF:
+ ath11k_warn(ab,
+ "cannot restart radio %d that hasn't been started\n",
+ i);
+ break;
+ case ATH11K_STATE_RESTARTING:
+ break;
+ case ATH11K_STATE_RESTARTED:
+ ar->state = ATH11K_STATE_WEDGED;
+ fallthrough;
+ case ATH11K_STATE_WEDGED:
+ ath11k_warn(ab,
+ "device is wedged, will not restart radio %d\n", i);
+ break;
+ case ATH11K_STATE_FTM:
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "fw mode reset done radio %d\n", i);
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+ complete(&ab->driver_recovery);
+}
+
+static void ath11k_core_restart(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, restart_work);
+ int ret;
+
+ ret = ath11k_core_reconfigure_on_crash(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to reconfigure driver on crash recovery\n");
+ return;
+ }
+
+ if (ab->is_reset)
+ complete_all(&ab->reconfigure_complete);
+
+ if (!ab->is_reset)
+ ath11k_core_post_reconfigure_recovery(ab);
+}
+
+static void ath11k_core_reset(struct work_struct *work)
+{
+ struct ath11k_base *ab = container_of(work, struct ath11k_base, reset_work);
+ int reset_count, fail_cont_count;
+ long time_left;
+
+ if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) {
+ ath11k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
+ return;
+ }
+
+ /* Sometimes the recovery will fail and then the next all recovery fail,
+ * this is to avoid infinite recovery since it can not recovery success.
+ */
+ fail_cont_count = atomic_read(&ab->fail_cont_count);
+
+ if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FINAL)
+ return;
+
+ if (fail_cont_count >= ATH11K_RESET_MAX_FAIL_COUNT_FIRST &&
+ time_before(jiffies, ab->reset_fail_timeout))
+ return;
+
+ reset_count = atomic_inc_return(&ab->reset_count);
+
+ if (reset_count > 1) {
+ /* Sometimes it happened another reset worker before the previous one
+ * completed, then the second reset worker will destroy the previous one,
+ * thus below is to avoid that.
+ */
+ ath11k_warn(ab, "already resetting count %d\n", reset_count);
+
+ reinit_completion(&ab->reset_complete);
+ time_left = wait_for_completion_timeout(&ab->reset_complete,
+ ATH11K_RESET_TIMEOUT_HZ);
+
+ if (time_left) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "to skip reset\n");
+ atomic_dec(&ab->reset_count);
+ return;
+ }
+
+ ab->reset_fail_timeout = jiffies + ATH11K_RESET_FAIL_TIMEOUT_HZ;
+ /* Record the continuous recovery fail count when recovery failed*/
+ atomic_inc(&ab->fail_cont_count);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset starting\n");
+
+ ab->is_reset = true;
+ atomic_set(&ab->recovery_count, 0);
+ reinit_completion(&ab->recovery_start);
+ atomic_set(&ab->recovery_start_count, 0);
+
+ ath11k_core_pre_reconfigure_recovery(ab);
+
+ reinit_completion(&ab->reconfigure_complete);
+ ath11k_core_post_reconfigure_recovery(ab);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "waiting recovery start...\n");
+
+ time_left = wait_for_completion_timeout(&ab->recovery_start,
+ ATH11K_RECOVER_START_TIMEOUT_HZ);
+
+ ath11k_hif_power_down(ab);
+ ath11k_hif_power_up(ab);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
+}
+
+static int ath11k_init_hw_params(struct ath11k_base *ab)
+{
+ const struct ath11k_hw_params *hw_params = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_hw_params); i++) {
+ hw_params = &ath11k_hw_params[i];
+
+ if (hw_params->hw_rev == ab->hw_rev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath11k_hw_params)) {
+ ath11k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev);
+ return -EINVAL;
+ }
+
+ ab->hw_params = *hw_params;
+
+ ath11k_info(ab, "%s\n", ab->hw_params.name);
+
+ return 0;
+}
+
+int ath11k_core_pre_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_init_hw_params(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to get hw params: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_pre_init);
+
+int ath11k_core_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_core_soc_create(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to create soc core: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_init);
+
+void ath11k_core_deinit(struct ath11k_base *ab)
+{
+ mutex_lock(&ab->core_lock);
+
+ ath11k_core_pdev_destroy(ab);
+ ath11k_core_stop(ab);
+
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_hif_power_down(ab);
+ ath11k_mac_destroy(ab);
+ ath11k_core_soc_destroy(ab);
+}
+EXPORT_SYMBOL(ath11k_core_deinit);
+
+void ath11k_core_free(struct ath11k_base *ab)
+{
+ destroy_workqueue(ab->workqueue_aux);
+ destroy_workqueue(ab->workqueue);
+
+ kfree(ab);
+}
+EXPORT_SYMBOL(ath11k_core_free);
+
+struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath11k_bus bus)
+{
+ struct ath11k_base *ab;
+
+ ab = kzalloc(sizeof(*ab) + priv_size, GFP_KERNEL);
+ if (!ab)
+ return NULL;
+
+ init_completion(&ab->driver_recovery);
+
+ ab->workqueue = create_singlethread_workqueue("ath11k_wq");
+ if (!ab->workqueue)
+ goto err_sc_free;
+
+ ab->workqueue_aux = create_singlethread_workqueue("ath11k_aux_wq");
+ if (!ab->workqueue_aux)
+ goto err_free_wq;
+
+ mutex_init(&ab->core_lock);
+ mutex_init(&ab->tbl_mtx_lock);
+ spin_lock_init(&ab->base_lock);
+ mutex_init(&ab->vdev_id_11d_lock);
+ init_completion(&ab->reset_complete);
+ init_completion(&ab->reconfigure_complete);
+ init_completion(&ab->recovery_start);
+
+ INIT_LIST_HEAD(&ab->peers);
+ init_waitqueue_head(&ab->peer_mapping_wq);
+ init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
+ init_waitqueue_head(&ab->qmi.cold_boot_waitq);
+ INIT_WORK(&ab->restart_work, ath11k_core_restart);
+ INIT_WORK(&ab->update_11d_work, ath11k_update_11d);
+ INIT_WORK(&ab->reset_work, ath11k_core_reset);
+ timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
+ init_completion(&ab->htc_suspend);
+ init_completion(&ab->wow.wakeup_completed);
+
+ ab->dev = dev;
+ ab->hif.bus = bus;
+
+ return ab;
+
+err_free_wq:
+ destroy_workqueue(ab->workqueue);
+err_sc_free:
+ kfree(ab);
+ return NULL;
+}
+EXPORT_SYMBOL(ath11k_core_alloc);
+
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ax wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
new file mode 100644
index 0000000000..b044477624
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -0,0 +1,1254 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_CORE_H
+#define ATH11K_CORE_H
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/bitfield.h>
+#include <linux/dmi.h>
+#include <linux/ctype.h>
+#include <linux/rhashtable.h>
+#include <linux/average.h>
+#include "qmi.h"
+#include "htc.h"
+#include "wmi.h"
+#include "hal.h"
+#include "dp.h"
+#include "ce.h"
+#include "mac.h"
+#include "hw.h"
+#include "hal_rx.h"
+#include "reg.h"
+#include "thermal.h"
+#include "dbring.h"
+#include "spectral.h"
+#include "wow.h"
+
+#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
+
+#define ATH11K_TX_MGMT_NUM_PENDING_MAX 512
+
+#define ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI 64
+
+/* Pending management packets threshold for dropping probe responses */
+#define ATH11K_PRB_RSP_DROP_THRESHOLD ((ATH11K_TX_MGMT_TARGET_MAX_SUPPORT_WMI * 3) / 4)
+
+#define ATH11K_INVALID_HW_MAC_ID 0xFF
+#define ATH11K_CONNECTION_LOSS_HZ (3 * HZ)
+
+/* SMBIOS type containing Board Data File Name Extension */
+#define ATH11K_SMBIOS_BDF_EXT_TYPE 0xF8
+
+/* SMBIOS type structure length (excluding strings-set) */
+#define ATH11K_SMBIOS_BDF_EXT_LENGTH 0x9
+
+/* The magic used by QCA spec */
+#define ATH11K_SMBIOS_BDF_EXT_MAGIC "BDF_"
+
+extern unsigned int ath11k_frame_mode;
+extern bool ath11k_ftm_mode;
+
+#define ATH11K_SCAN_TIMEOUT_HZ (20 * HZ)
+
+#define ATH11K_MON_TIMER_INTERVAL 10
+#define ATH11K_RESET_TIMEOUT_HZ (20 * HZ)
+#define ATH11K_RESET_MAX_FAIL_COUNT_FIRST 3
+#define ATH11K_RESET_MAX_FAIL_COUNT_FINAL 5
+#define ATH11K_RESET_FAIL_TIMEOUT_HZ (20 * HZ)
+#define ATH11K_RECONFIGURE_TIMEOUT_HZ (10 * HZ)
+#define ATH11K_RECOVER_START_TIMEOUT_HZ (20 * HZ)
+
+enum ath11k_supported_bw {
+ ATH11K_BW_20 = 0,
+ ATH11K_BW_40 = 1,
+ ATH11K_BW_80 = 2,
+ ATH11K_BW_160 = 3,
+};
+
+enum ath11k_bdf_search {
+ ATH11K_BDF_SEARCH_DEFAULT,
+ ATH11K_BDF_SEARCH_BUS_AND_BOARD,
+};
+
+enum wme_ac {
+ WME_AC_BE,
+ WME_AC_BK,
+ WME_AC_VI,
+ WME_AC_VO,
+ WME_NUM_AC
+};
+
+#define ATH11K_HT_MCS_MAX 7
+#define ATH11K_VHT_MCS_MAX 9
+#define ATH11K_HE_MCS_MAX 11
+
+enum ath11k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH11K_CRYPT_MODE_HW,
+ /* Only use software crypto */
+ ATH11K_CRYPT_MODE_SW,
+};
+
+static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
+{
+ return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
+ ((tid == 1) || (tid == 2)) ? WME_AC_BK :
+ ((tid == 4) || (tid == 5)) ? WME_AC_VI :
+ WME_AC_VO);
+}
+
+enum ath11k_skb_flags {
+ ATH11K_SKB_HW_80211_ENCAP = BIT(0),
+ ATH11K_SKB_CIPHER_SET = BIT(1),
+};
+
+struct ath11k_skb_cb {
+ dma_addr_t paddr;
+ u8 eid;
+ u8 flags;
+ u32 cipher;
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+} __packed;
+
+struct ath11k_skb_rxcb {
+ dma_addr_t paddr;
+ bool is_first_msdu;
+ bool is_last_msdu;
+ bool is_continuation;
+ bool is_mcbc;
+ bool is_eapol;
+ struct hal_rx_desc *rx_desc;
+ u8 err_rel_src;
+ u8 err_code;
+ u8 mac_id;
+ u8 unmapped;
+ u8 is_frag;
+ u8 tid;
+ u16 peer_id;
+ u16 seq_no;
+};
+
+enum ath11k_hw_rev {
+ ATH11K_HW_IPQ8074,
+ ATH11K_HW_QCA6390_HW20,
+ ATH11K_HW_IPQ6018_HW10,
+ ATH11K_HW_QCN9074_HW10,
+ ATH11K_HW_WCN6855_HW20,
+ ATH11K_HW_WCN6855_HW21,
+ ATH11K_HW_WCN6750_HW10,
+ ATH11K_HW_IPQ5018_HW10,
+};
+
+enum ath11k_firmware_mode {
+ /* the default mode, standard 802.11 functionality */
+ ATH11K_FIRMWARE_MODE_NORMAL,
+
+ /* factory tests etc */
+ ATH11K_FIRMWARE_MODE_FTM,
+
+ /* Cold boot calibration */
+ ATH11K_FIRMWARE_MODE_COLD_BOOT = 7,
+};
+
+extern bool ath11k_cold_boot_cal;
+
+#define ATH11K_IRQ_NUM_MAX 52
+#define ATH11K_EXT_IRQ_NUM_MAX 16
+
+struct ath11k_ext_irq_grp {
+ struct ath11k_base *ab;
+ u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
+ u32 num_irq;
+ u32 grp_id;
+ u64 timestamp;
+ bool napi_enabled;
+ struct napi_struct napi;
+ struct net_device napi_ndev;
+};
+
+enum ath11k_smbios_cc_type {
+ /* disable country code setting from SMBIOS */
+ ATH11K_SMBIOS_CC_DISABLE = 0,
+
+ /* set country code by ANSI country name, based on ISO3166-1 alpha2 */
+ ATH11K_SMBIOS_CC_ISO = 1,
+
+ /* worldwide regdomain */
+ ATH11K_SMBIOS_CC_WW = 2,
+};
+
+struct ath11k_smbios_bdf {
+ struct dmi_header hdr;
+
+ u8 features_disabled;
+
+ /* enum ath11k_smbios_cc_type */
+ u8 country_code_flag;
+
+ /* To set specific country, you need to set country code
+ * flag=ATH11K_SMBIOS_CC_ISO first, then if country is United
+ * States, then country code value = 0x5553 ("US",'U' = 0x55, 'S'=
+ * 0x53). To set country to INDONESIA, then country code value =
+ * 0x4944 ("IN", 'I'=0x49, 'D'=0x44). If country code flag =
+ * ATH11K_SMBIOS_CC_WW, then you can use worldwide regulatory
+ * setting.
+ */
+ u16 cc_code;
+
+ u8 bdf_enabled;
+ u8 bdf_ext[];
+} __packed;
+
+#define HEHANDLE_CAP_PHYINFO_SIZE 3
+#define HECAP_PHYINFO_SIZE 9
+#define HECAP_MACINFO_SIZE 5
+#define HECAP_TXRX_MCS_NSS_SIZE 2
+#define HECAP_PPET16_PPET8_MAX_SIZE 25
+
+#define HE_PPET16_PPET8_SIZE 8
+
+/* 802.11ax PPE (PPDU packet Extension) threshold */
+struct he_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_mask;
+ u32 ppet16_ppet8_ru3_ru0[HE_PPET16_PPET8_SIZE];
+};
+
+struct ath11k_he {
+ u8 hecap_macinfo[HECAP_MACINFO_SIZE];
+ u32 hecap_rxmcsnssmap;
+ u32 hecap_txmcsnssmap;
+ u32 hecap_phyinfo[HEHANDLE_CAP_PHYINFO_SIZE];
+ struct he_ppe_threshold hecap_ppet;
+ u32 heop_param;
+};
+
+#define MAX_RADIOS 3
+
+/* ipq5018 hw param macros */
+#define MAX_RADIOS_5018 1
+#define CE_CNT_5018 6
+#define TARGET_CE_CNT_5018 9
+#define SVC_CE_MAP_LEN_5018 17
+#define RXDMA_PER_PDEV_5018 1
+
+enum {
+ WMI_HOST_TP_SCALE_MAX = 0,
+ WMI_HOST_TP_SCALE_50 = 1,
+ WMI_HOST_TP_SCALE_25 = 2,
+ WMI_HOST_TP_SCALE_12 = 3,
+ WMI_HOST_TP_SCALE_MIN = 4,
+ WMI_HOST_TP_SCALE_SIZE = 5,
+};
+
+enum ath11k_scan_state {
+ ATH11K_SCAN_IDLE,
+ ATH11K_SCAN_STARTING,
+ ATH11K_SCAN_RUNNING,
+ ATH11K_SCAN_ABORTING,
+};
+
+enum ath11k_11d_state {
+ ATH11K_11D_IDLE,
+ ATH11K_11D_PREPARING,
+ ATH11K_11D_RUNNING,
+};
+
+enum ath11k_dev_flags {
+ ATH11K_CAC_RUNNING,
+ ATH11K_FLAG_CORE_REGISTERED,
+ ATH11K_FLAG_CRASH_FLUSH,
+ ATH11K_FLAG_RAW_MODE,
+ ATH11K_FLAG_HW_CRYPTO_DISABLED,
+ ATH11K_FLAG_BTCOEX,
+ ATH11K_FLAG_RECOVERY,
+ ATH11K_FLAG_UNREGISTERING,
+ ATH11K_FLAG_REGISTERED,
+ ATH11K_FLAG_QMI_FAIL,
+ ATH11K_FLAG_HTC_SUSPEND_COMPLETE,
+ ATH11K_FLAG_CE_IRQ_ENABLED,
+ ATH11K_FLAG_EXT_IRQ_ENABLED,
+ ATH11K_FLAG_FIXED_MEM_RGN,
+ ATH11K_FLAG_DEVICE_INIT_DONE,
+ ATH11K_FLAG_MULTI_MSI_VECTORS,
+ ATH11K_FLAG_FTM_SEGMENTED,
+};
+
+enum ath11k_monitor_flags {
+ ATH11K_FLAG_MONITOR_CONF_ENABLED,
+ ATH11K_FLAG_MONITOR_STARTED,
+ ATH11K_FLAG_MONITOR_VDEV_CREATED,
+};
+
+#define ATH11K_IPV6_UC_TYPE 0
+#define ATH11K_IPV6_AC_TYPE 1
+
+#define ATH11K_IPV6_MAX_COUNT 16
+#define ATH11K_IPV4_MAX_COUNT 2
+
+struct ath11k_arp_ns_offload {
+ u8 ipv4_addr[ATH11K_IPV4_MAX_COUNT][4];
+ u32 ipv4_count;
+ u32 ipv6_count;
+ u8 ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
+ u8 self_ipv6_addr[ATH11K_IPV6_MAX_COUNT][16];
+ u8 ipv6_type[ATH11K_IPV6_MAX_COUNT];
+ bool ipv6_valid[ATH11K_IPV6_MAX_COUNT];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ath11k_rekey_data {
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KCK_LEN];
+ u64 replay_ctr;
+ bool enable_offload;
+};
+
+struct ath11k_vif {
+ u32 vdev_id;
+ enum wmi_vdev_type vdev_type;
+ enum wmi_vdev_subtype vdev_subtype;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u16 ast_hash;
+ u16 ast_idx;
+ u16 tcl_metadata;
+ u8 hal_addr_search_flags;
+ u8 search_type;
+
+ struct ath11k *ar;
+ struct ieee80211_vif *vif;
+
+ u16 tx_seq_no;
+ struct wmi_wmm_params_all_arg wmm_params;
+ struct list_head list;
+ union {
+ struct {
+ u32 uapsd;
+ } sta;
+ struct {
+ /* 127 stations; wmi limit */
+ u8 tim_bitmap[16];
+ u8 tim_len;
+ u32 ssid_len;
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ bool hidden_ssid;
+ /* P2P_IE with NoA attribute for P2P_GO case */
+ u32 noa_len;
+ u8 *noa_data;
+ } ap;
+ } u;
+
+ bool is_started;
+ bool is_up;
+ bool ftm_responder;
+ bool spectral_enabled;
+ bool ps;
+ u32 aid;
+ u8 bssid[ETH_ALEN];
+ struct cfg80211_bitrate_mask bitrate_mask;
+ struct delayed_work connection_loss_work;
+ int num_legacy_stations;
+ int rtscts_prot_mode;
+ int txpower;
+ bool rsnie_present;
+ bool wpaie_present;
+ bool bcca_zero_sent;
+ bool do_not_send_tmpl;
+ struct ieee80211_chanctx_conf chanctx;
+ struct ath11k_arp_ns_offload arp_ns_offload;
+ struct ath11k_rekey_data rekey_data;
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct dentry *debugfs_twt;
+#endif /* CONFIG_ATH11K_DEBUGFS */
+};
+
+struct ath11k_vif_iter {
+ u32 vdev_id;
+ struct ath11k_vif *arvif;
+};
+
+struct ath11k_rx_peer_stats {
+ u64 num_msdu;
+ u64 num_mpdu_fcs_ok;
+ u64 num_mpdu_fcs_err;
+ u64 tcp_msdu_count;
+ u64 udp_msdu_count;
+ u64 other_msdu_count;
+ u64 ampdu_msdu_count;
+ u64 non_ampdu_msdu_count;
+ u64 stbc_count;
+ u64 beamformed_count;
+ u64 mcs_count[HAL_RX_MAX_MCS + 1];
+ u64 nss_count[HAL_RX_MAX_NSS];
+ u64 bw_count[HAL_RX_BW_MAX];
+ u64 gi_count[HAL_RX_GI_MAX];
+ u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
+ u64 tid_count[IEEE80211_NUM_TIDS + 1];
+ u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
+ u64 reception_type[HAL_RX_RECEPTION_TYPE_MAX];
+ u64 rx_duration;
+ u64 dcm_count;
+ u64 ru_alloc_cnt[HAL_RX_RU_ALLOC_TYPE_MAX];
+};
+
+#define ATH11K_HE_MCS_NUM 12
+#define ATH11K_VHT_MCS_NUM 10
+#define ATH11K_BW_NUM 4
+#define ATH11K_NSS_NUM 4
+#define ATH11K_LEGACY_NUM 12
+#define ATH11K_GI_NUM 4
+#define ATH11K_HT_MCS_NUM 32
+
+enum ath11k_pkt_rx_err {
+ ATH11K_PKT_RX_ERR_FCS,
+ ATH11K_PKT_RX_ERR_TKIP,
+ ATH11K_PKT_RX_ERR_CRYPT,
+ ATH11K_PKT_RX_ERR_PEER_IDX_INVAL,
+ ATH11K_PKT_RX_ERR_MAX,
+};
+
+enum ath11k_ampdu_subfrm_num {
+ ATH11K_AMPDU_SUBFRM_NUM_10,
+ ATH11K_AMPDU_SUBFRM_NUM_20,
+ ATH11K_AMPDU_SUBFRM_NUM_30,
+ ATH11K_AMPDU_SUBFRM_NUM_40,
+ ATH11K_AMPDU_SUBFRM_NUM_50,
+ ATH11K_AMPDU_SUBFRM_NUM_60,
+ ATH11K_AMPDU_SUBFRM_NUM_MORE,
+ ATH11K_AMPDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_amsdu_subfrm_num {
+ ATH11K_AMSDU_SUBFRM_NUM_1,
+ ATH11K_AMSDU_SUBFRM_NUM_2,
+ ATH11K_AMSDU_SUBFRM_NUM_3,
+ ATH11K_AMSDU_SUBFRM_NUM_4,
+ ATH11K_AMSDU_SUBFRM_NUM_MORE,
+ ATH11K_AMSDU_SUBFRM_NUM_MAX,
+};
+
+enum ath11k_counter_type {
+ ATH11K_COUNTER_TYPE_BYTES,
+ ATH11K_COUNTER_TYPE_PKTS,
+ ATH11K_COUNTER_TYPE_MAX,
+};
+
+enum ath11k_stats_type {
+ ATH11K_STATS_TYPE_SUCC,
+ ATH11K_STATS_TYPE_FAIL,
+ ATH11K_STATS_TYPE_RETRY,
+ ATH11K_STATS_TYPE_AMPDU,
+ ATH11K_STATS_TYPE_MAX,
+};
+
+struct ath11k_htt_data_stats {
+ u64 legacy[ATH11K_COUNTER_TYPE_MAX][ATH11K_LEGACY_NUM];
+ u64 ht[ATH11K_COUNTER_TYPE_MAX][ATH11K_HT_MCS_NUM];
+ u64 vht[ATH11K_COUNTER_TYPE_MAX][ATH11K_VHT_MCS_NUM];
+ u64 he[ATH11K_COUNTER_TYPE_MAX][ATH11K_HE_MCS_NUM];
+ u64 bw[ATH11K_COUNTER_TYPE_MAX][ATH11K_BW_NUM];
+ u64 nss[ATH11K_COUNTER_TYPE_MAX][ATH11K_NSS_NUM];
+ u64 gi[ATH11K_COUNTER_TYPE_MAX][ATH11K_GI_NUM];
+};
+
+struct ath11k_htt_tx_stats {
+ struct ath11k_htt_data_stats stats[ATH11K_STATS_TYPE_MAX];
+ u64 tx_duration;
+ u64 ba_fails;
+ u64 ack_fails;
+};
+
+struct ath11k_per_ppdu_tx_stats {
+ u16 succ_pkts;
+ u16 failed_pkts;
+ u16 retry_pkts;
+ u32 succ_bytes;
+ u32 failed_bytes;
+ u32 retry_bytes;
+};
+
+DECLARE_EWMA(avg_rssi, 10, 8)
+
+struct ath11k_sta {
+ struct ath11k_vif *arvif;
+
+ /* the following are protected by ar->data_lock */
+ u32 changed; /* IEEE80211_RC_* */
+ u32 bw;
+ u32 nss;
+ u32 smps;
+ enum hal_pn_type pn_type;
+
+ struct work_struct update_wk;
+ struct work_struct set_4addr_wk;
+ struct rate_info txrate;
+ u32 peer_nss;
+ struct rate_info last_txrate;
+ u64 rx_duration;
+ u64 tx_duration;
+ u8 rssi_comb;
+ struct ewma_avg_rssi avg_rssi;
+ s8 rssi_beacon;
+ s8 chain_signal[IEEE80211_MAX_CHAINS];
+ struct ath11k_htt_tx_stats *tx_stats;
+ struct ath11k_rx_peer_stats *rx_stats;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ /* protected by conf_mutex */
+ bool aggr_mode;
+#endif
+
+ bool use_4addr_set;
+ u16 tcl_metadata;
+
+ /* Protected with ar->data_lock */
+ enum ath11k_wmi_peer_ps_state peer_ps_state;
+ u64 ps_start_time;
+ u64 ps_start_jiffies;
+ u64 ps_total_duration;
+ bool peer_current_ps_valid;
+
+ u32 bw_prev;
+};
+
+#define ATH11K_MIN_5G_FREQ 4150
+#define ATH11K_MIN_6G_FREQ 5925
+#define ATH11K_MAX_6G_FREQ 7115
+#define ATH11K_NUM_CHANS 102
+#define ATH11K_MAX_5G_CHAN 177
+
+enum ath11k_state {
+ ATH11K_STATE_OFF,
+ ATH11K_STATE_ON,
+ ATH11K_STATE_RESTARTING,
+ ATH11K_STATE_RESTARTED,
+ ATH11K_STATE_WEDGED,
+ ATH11K_STATE_FTM,
+ /* Add other states as required */
+};
+
+/* Antenna noise floor */
+#define ATH11K_DEFAULT_NOISE_FLOOR -95
+
+#define ATH11K_INVALID_RSSI_FULL -1
+
+#define ATH11K_INVALID_RSSI_EMPTY -128
+
+struct ath11k_fw_stats {
+ struct dentry *debugfs_fwstats;
+ u32 pdev_id;
+ u32 stats_id;
+ struct list_head pdevs;
+ struct list_head vdevs;
+ struct list_head bcn;
+};
+
+struct ath11k_dbg_htt_stats {
+ u8 type;
+ u8 reset;
+ struct debug_htt_stats_req *stats_req;
+ /* protects shared stats req buffer */
+ spinlock_t lock;
+};
+
+#define MAX_MODULE_ID_BITMAP_WORDS 16
+
+struct ath11k_debug {
+ struct dentry *debugfs_pdev;
+ struct ath11k_dbg_htt_stats htt_stats;
+ u32 extd_tx_stats;
+ u32 extd_rx_stats;
+ u32 pktlog_filter;
+ u32 pktlog_mode;
+ u32 pktlog_peer_valid;
+ u8 pktlog_peer_addr[ETH_ALEN];
+ u32 rx_filter;
+ u32 mem_offset;
+ u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
+ struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
+};
+
+struct ath11k_per_peer_tx_stats {
+ u32 succ_bytes;
+ u32 retry_bytes;
+ u32 failed_bytes;
+ u16 succ_pkts;
+ u16 retry_pkts;
+ u16 failed_pkts;
+ u32 duration;
+ u8 ba_fails;
+ bool is_ampdu;
+};
+
+#define ATH11K_FLUSH_TIMEOUT (5 * HZ)
+#define ATH11K_VDEV_DELETE_TIMEOUT_HZ (5 * HZ)
+
+struct ath11k {
+ struct ath11k_base *ab;
+ struct ath11k_pdev *pdev;
+ struct ieee80211_hw *hw;
+ struct ieee80211_ops *ops;
+ struct ath11k_pdev_wmi *wmi;
+ struct ath11k_pdev_dp dp;
+ u8 mac_addr[ETH_ALEN];
+ struct ath11k_he ar_he;
+ enum ath11k_state state;
+ bool supports_6ghz;
+ struct {
+ struct completion started;
+ struct completion completed;
+ struct completion on_channel;
+ struct delayed_work timeout;
+ enum ath11k_scan_state state;
+ bool is_roc;
+ int vdev_id;
+ int roc_freq;
+ bool roc_notify;
+ } scan;
+
+ struct {
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+ struct ieee80211_sband_iftype_data
+ iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
+ } mac;
+
+ unsigned long dev_flags;
+ unsigned int filter_flags;
+ unsigned long monitor_flags;
+ u32 min_tx_power;
+ u32 max_tx_power;
+ u32 txpower_limit_2g;
+ u32 txpower_limit_5g;
+ u32 txpower_scale;
+ u32 power_scale;
+ u32 chan_tx_pwr;
+ u32 num_stations;
+ u32 max_num_stations;
+ /* To synchronize concurrent synchronous mac80211 callback operations,
+ * concurrent debugfs configuration and concurrent FW statistics events.
+ */
+ struct mutex conf_mutex;
+ /* protects the radio specific data like debug stats, ppdu_stats_info stats,
+ * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
+ * channel context data, survey info, test mode data.
+ */
+ spinlock_t data_lock;
+
+ struct list_head arvifs;
+ /* should never be NULL; needed for regular htt rx */
+ struct ieee80211_channel *rx_channel;
+
+ /* valid during scan; needed for mgmt rx during scan */
+ struct ieee80211_channel *scan_channel;
+
+ u8 cfg_tx_chainmask;
+ u8 cfg_rx_chainmask;
+ u8 num_rx_chains;
+ u8 num_tx_chains;
+ /* pdev_idx starts from 0 whereas pdev->pdev_id starts with 1 */
+ u8 pdev_idx;
+ u8 lmac_id;
+
+ struct completion peer_assoc_done;
+ struct completion peer_delete_done;
+
+ int install_key_status;
+ struct completion install_key_done;
+
+ int last_wmi_vdev_start_status;
+ struct completion vdev_setup_done;
+ struct completion vdev_delete_done;
+
+ int num_peers;
+ int max_num_peers;
+ u32 num_started_vdevs;
+ u32 num_created_vdevs;
+ unsigned long long allocated_vdev_map;
+
+ struct idr txmgmt_idr;
+ /* protects txmgmt_idr data */
+ spinlock_t txmgmt_idr_lock;
+ atomic_t num_pending_mgmt_tx;
+ wait_queue_head_t txmgmt_empty_waitq;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock
+ */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+
+ /* Channel info events are expected to come in pairs without and with
+ * COMPLETE flag set respectively for each channel visit during scan.
+ *
+ * However there are deviations from this rule. This flag is used to
+ * avoid reporting garbage data.
+ */
+ bool ch_info_can_report_survey;
+ struct survey_info survey[ATH11K_NUM_CHANS];
+ struct completion bss_survey_done;
+
+ struct work_struct regd_update_work;
+
+ struct work_struct wmi_mgmt_tx_work;
+ struct sk_buff_head wmi_mgmt_tx_queue;
+
+ struct ath11k_wow wow;
+ struct completion target_suspend;
+ bool target_suspend_ack;
+ struct ath11k_per_peer_tx_stats peer_tx_stats;
+ struct list_head ppdu_stats_info;
+ u32 ppdu_stat_list_depth;
+
+ struct ath11k_per_peer_tx_stats cached_stats;
+ u32 last_ppdu_id;
+ u32 cached_ppdu_id;
+ int monitor_vdev_id;
+ struct completion fw_mode_reset;
+ u8 ftm_msgref;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct ath11k_debug debug;
+#endif
+#ifdef CONFIG_ATH11K_SPECTRAL
+ struct ath11k_spectral spectral;
+#endif
+ bool dfs_block_radar_events;
+ struct ath11k_thermal thermal;
+ u32 vdev_id_11d_scan;
+ struct completion completed_11d_scan;
+ enum ath11k_11d_state state_11d;
+ bool regdom_set_by_user;
+ int hw_rate_code;
+ u8 twt_enabled;
+ bool nlo_enabled;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ struct ath11k_fw_stats fw_stats;
+ struct completion fw_stats_complete;
+ bool fw_stats_done;
+
+ /* protected by conf_mutex */
+ bool ps_state_enable;
+ bool ps_timekeeper_enable;
+};
+
+struct ath11k_band_cap {
+ u32 phy_id;
+ u32 max_bw_supported;
+ u32 ht_cap_info;
+ u32 he_cap_info[2];
+ u32 he_mcs;
+ u32 he_cap_phy_info[PSOC_HOST_MAX_PHY_SIZE];
+ struct ath11k_ppe_threshold he_ppet;
+ u16 he_6ghz_capa;
+};
+
+struct ath11k_pdev_cap {
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 vht_cap;
+ u32 vht_mcs;
+ u32 he_mcs;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 tx_chain_mask_shift;
+ u32 rx_chain_mask_shift;
+ struct ath11k_band_cap band[NUM_NL80211_BANDS];
+ bool nss_ratio_enabled;
+ u8 nss_ratio_info;
+};
+
+struct ath11k_pdev {
+ struct ath11k *ar;
+ u32 pdev_id;
+ struct ath11k_pdev_cap cap;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct ath11k_board_data {
+ const struct firmware *fw;
+ const void *data;
+ size_t len;
+};
+
+struct ath11k_pci_ops {
+ int (*wakeup)(struct ath11k_base *ab);
+ void (*release)(struct ath11k_base *ab);
+ int (*get_msi_irq)(struct ath11k_base *ab, unsigned int vector);
+ void (*window_write32)(struct ath11k_base *ab, u32 offset, u32 value);
+ u32 (*window_read32)(struct ath11k_base *ab, u32 offset);
+};
+
+/* IPQ8074 HW channel counters frequency value in hertz */
+#define IPQ8074_CC_FREQ_HERTZ 320000
+
+struct ath11k_bp_stats {
+ /* Head Pointer reported by the last HTT Backpressure event for the ring */
+ u16 hp;
+
+ /* Tail Pointer reported by the last HTT Backpressure event for the ring */
+ u16 tp;
+
+ /* Number of Backpressure events received for the ring */
+ u32 count;
+
+ /* Last recorded event timestamp */
+ unsigned long jiffies;
+};
+
+struct ath11k_dp_ring_bp_stats {
+ struct ath11k_bp_stats umac_ring_bp_stats[HTT_SW_UMAC_RING_IDX_MAX];
+ struct ath11k_bp_stats lmac_ring_bp_stats[HTT_SW_LMAC_RING_IDX_MAX][MAX_RADIOS];
+};
+
+struct ath11k_soc_dp_tx_err_stats {
+ /* TCL Ring Descriptor unavailable */
+ u32 desc_na[DP_TCL_NUM_RING_MAX];
+ /* Other failures during dp_tx due to mem allocation failure
+ * idr unavailable etc.
+ */
+ atomic_t misc_fail;
+};
+
+struct ath11k_soc_dp_stats {
+ u32 err_ring_pkts;
+ u32 invalid_rbm;
+ u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
+ u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
+ u32 hal_reo_error[DP_REO_DST_RING_MAX];
+ struct ath11k_soc_dp_tx_err_stats tx_err;
+ struct ath11k_dp_ring_bp_stats bp_stats;
+};
+
+struct ath11k_msi_user {
+ char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct ath11k_msi_config {
+ int total_vectors;
+ int total_users;
+ struct ath11k_msi_user *users;
+ u16 hw_rev;
+};
+
+/* Master structure to hold the hw data which may be used in core module */
+struct ath11k_base {
+ enum ath11k_hw_rev hw_rev;
+ enum ath11k_firmware_mode fw_mode;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct ath11k_qmi qmi;
+ struct ath11k_wmi_base wmi_ab;
+ struct completion fw_ready;
+ int num_radios;
+ /* HW channel counters frequency value in hertz common to all MACs */
+ u32 cc_freq_hz;
+
+ struct ath11k_htc htc;
+
+ struct ath11k_dp dp;
+
+ void __iomem *mem;
+ void __iomem *mem_ce;
+ unsigned long mem_len;
+
+ struct {
+ enum ath11k_bus bus;
+ const struct ath11k_hif_ops *ops;
+ } hif;
+
+ struct {
+ struct completion wakeup_completed;
+ } wow;
+
+ struct ath11k_ce ce;
+ struct timer_list rx_replenish_retry;
+ struct ath11k_hal hal;
+ /* To synchronize core_start/core_stop */
+ struct mutex core_lock;
+ /* Protects data like peers */
+ spinlock_t base_lock;
+ struct ath11k_pdev pdevs[MAX_RADIOS];
+ struct {
+ enum WMI_HOST_WLAN_BAND supported_bands;
+ u32 pdev_id;
+ } target_pdev_ids[MAX_RADIOS];
+ u8 target_pdev_count;
+ struct ath11k_pdev __rcu *pdevs_active[MAX_RADIOS];
+ struct ath11k_hal_reg_capabilities_ext hal_reg_cap[MAX_RADIOS];
+ unsigned long long free_vdev_map;
+
+ /* To synchronize rhash tbl write operation */
+ struct mutex tbl_mtx_lock;
+
+ /* The rhashtable containing struct ath11k_peer keyed by mac addr */
+ struct rhashtable *rhead_peer_addr;
+ struct rhashtable_params rhash_peer_addr_param;
+
+ /* The rhashtable containing struct ath11k_peer keyed by id */
+ struct rhashtable *rhead_peer_id;
+ struct rhashtable_params rhash_peer_id_param;
+
+ struct list_head peers;
+ wait_queue_head_t peer_mapping_wq;
+ u8 mac_addr[ETH_ALEN];
+ bool wmi_ready;
+ u32 wlan_init_status;
+ int irq_num[ATH11K_IRQ_NUM_MAX];
+ struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ struct ath11k_targ_cap target_caps;
+ u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
+ bool pdevs_macaddr_valid;
+ int bd_api;
+
+ struct ath11k_hw_params hw_params;
+
+ const struct firmware *cal_file;
+
+ /* Below regd's are protected by ab->data_lock */
+ /* This is the regd set for every radio
+ * by the firmware during initialization
+ */
+ struct ieee80211_regdomain *default_regd[MAX_RADIOS];
+ /* This regd is set during dynamic country setting
+ * This may or may not be used during the runtime
+ */
+ struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+
+ /* Current DFS Regulatory */
+ enum ath11k_dfs_region dfs_region;
+#ifdef CONFIG_ATH11K_DEBUGFS
+ struct dentry *debugfs_soc;
+#endif
+ struct ath11k_soc_dp_stats soc_stats;
+
+ unsigned long dev_flags;
+ struct completion driver_recovery;
+ struct workqueue_struct *workqueue;
+ struct work_struct restart_work;
+ struct work_struct update_11d_work;
+ u8 new_alpha2[3];
+ struct workqueue_struct *workqueue_aux;
+ struct work_struct reset_work;
+ atomic_t reset_count;
+ atomic_t recovery_count;
+ atomic_t recovery_start_count;
+ bool is_reset;
+ struct completion reset_complete;
+ struct completion reconfigure_complete;
+ struct completion recovery_start;
+ /* continuous recovery fail count */
+ atomic_t fail_cont_count;
+ unsigned long reset_fail_timeout;
+ struct {
+ /* protected by data_lock */
+ u32 fw_crash_counter;
+ } stats;
+ u32 pktlog_defs_checksum;
+
+ struct ath11k_dbring_cap *db_caps;
+ u32 num_db_cap;
+
+ /* To synchronize 11d scan vdev id */
+ struct mutex vdev_id_11d_lock;
+ struct timer_list mon_reap_timer;
+
+ struct completion htc_suspend;
+
+ struct {
+ enum ath11k_bdf_search bdf_search;
+ u32 vendor;
+ u32 device;
+ u32 subsystem_vendor;
+ u32 subsystem_device;
+ } id;
+
+ struct {
+ struct {
+ const struct ath11k_msi_config *config;
+ u32 ep_base_data;
+ u32 irqs[32];
+ u32 addr_lo;
+ u32 addr_hi;
+ } msi;
+
+ const struct ath11k_pci_ops *ops;
+ } pci;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ struct {
+ u32 data_pos;
+ u32 expected_seq;
+ u8 *eventdata;
+ } testmode;
+#endif
+
+ /* must be last */
+ u8 drv_priv[] __aligned(sizeof(void *));
+};
+
+struct ath11k_fw_stats_pdev {
+ struct list_head list;
+
+ /* PDEV stats */
+ s32 ch_noise_floor;
+ /* Cycles spent transmitting frames */
+ u32 tx_frame_count;
+ /* Cycles spent receiving frames */
+ u32 rx_frame_count;
+ /* Total channel busy time, evidently */
+ u32 rx_clear_count;
+ /* Total on-channel time */
+ u32 cycle_count;
+ u32 phy_err_count;
+ u32 chan_tx_power;
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+
+ /* PDEV TX stats */
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+ /* Num Local frames queued */
+ s32 local_enqued;
+ /* Num Local frames done */
+ s32 local_freed;
+ /* Num queued to HW */
+ s32 hw_queued;
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+ /* Num underruns */
+ s32 underrun;
+ /* Num hw paused */
+ u32 hw_paused;
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+ /* Num MPDUs requeued by SW */
+ s32 mpdus_requeued;
+ /* excessive retries */
+ u32 tx_ko;
+ u32 tx_xretry;
+ /* data hw rate code */
+ u32 data_rc;
+ /* Scheduler self triggers */
+ u32 self_triggers;
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+ /* wal pdev resets */
+ u32 pdev_resets;
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+ /* Num sequences posted */
+ u32 seq_posted;
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+ /* Num sequences completed */
+ u32 seq_completed;
+ /* Num sequences restarted */
+ u32 seq_restarted;
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
+
+ /* PDEV RX stats */
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+ /* Number of PHY errors */
+ s32 phy_errs;
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
+};
+
+struct ath11k_fw_stats_vdev {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath11k_fw_stats_bcn {
+ struct list_head list;
+
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+};
+
+void ath11k_fw_stats_init(struct ath11k *ar);
+void ath11k_fw_stats_pdevs_free(struct list_head *head);
+void ath11k_fw_stats_vdevs_free(struct list_head *head);
+void ath11k_fw_stats_bcn_free(struct list_head *head);
+void ath11k_fw_stats_free(struct ath11k_fw_stats *stats);
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[];
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[];
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[];
+int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
+int ath11k_core_pre_init(struct ath11k_base *ab);
+int ath11k_core_init(struct ath11k_base *ath11k);
+void ath11k_core_deinit(struct ath11k_base *ath11k);
+struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
+ enum ath11k_bus bus);
+void ath11k_core_free(struct ath11k_base *ath11k);
+int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
+ struct ath11k_board_data *bd);
+int ath11k_core_fetch_regdb(struct ath11k_base *ab, struct ath11k_board_data *bd);
+int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
+ struct ath11k_board_data *bd,
+ const char *name);
+void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
+int ath11k_core_check_dt(struct ath11k_base *ath11k);
+int ath11k_core_check_smbios(struct ath11k_base *ab);
+void ath11k_core_halt(struct ath11k *ar);
+int ath11k_core_resume(struct ath11k_base *ab);
+int ath11k_core_suspend(struct ath11k_base *ab);
+void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
+bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
+
+const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
+ const char *filename);
+
+static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
+{
+ switch (state) {
+ case ATH11K_SCAN_IDLE:
+ return "idle";
+ case ATH11K_SCAN_STARTING:
+ return "starting";
+ case ATH11K_SCAN_RUNNING:
+ return "running";
+ case ATH11K_SCAN_ABORTING:
+ return "aborting";
+ }
+
+ return "unknown";
+}
+
+static inline struct ath11k_skb_cb *ATH11K_SKB_CB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath11k_skb_cb) >
+ IEEE80211_TX_INFO_DRIVER_DATA_SIZE);
+ return (struct ath11k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
+}
+
+static inline struct ath11k_skb_rxcb *ATH11K_SKB_RXCB(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ath11k_skb_rxcb) > sizeof(skb->cb));
+ return (struct ath11k_skb_rxcb *)skb->cb;
+}
+
+static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif)
+{
+ return (struct ath11k_vif *)vif->drv_priv;
+}
+
+static inline struct ath11k *ath11k_ab_to_ar(struct ath11k_base *ab,
+ int mac_id)
+{
+ return ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+}
+
+static inline void ath11k_core_create_firmware_path(struct ath11k_base *ab,
+ const char *filename,
+ void *buf, size_t buf_len)
+{
+ snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR,
+ ab->hw_params.fw.dir, filename);
+}
+
+static inline const char *ath11k_bus_str(enum ath11k_bus bus)
+{
+ switch (bus) {
+ case ATH11K_BUS_PCI:
+ return "pci";
+ case ATH11K_BUS_AHB:
+ return "ahb";
+ }
+
+ return "unknown";
+}
+
+#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
new file mode 100644
index 0000000000..5536e86423
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include "core.h"
+#include "debug.h"
+
+#define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
+
+int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
+{
+ u32 *temp;
+ int idx;
+
+ size = size >> 2;
+
+ for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
+ if (*temp == ATH11K_DB_MAGIC_VALUE)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
+ void *buffer, u32 size)
+{
+ /* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
+ * and the variable size is expected to be the number of u32 values
+ * to be stored, not the number of bytes.
+ */
+ size = size / sizeof(u32);
+
+ memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
+}
+
+static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ struct ath11k_dbring_element *buff,
+ enum wmi_direct_buffer_module id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ void *ptr_aligned, *ptr_unaligned, *desc;
+ int ret;
+ int buf_id;
+ u32 cookie;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+
+ lockdep_assert_held(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ ptr_unaligned = buff->payload;
+ ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
+ ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
+ paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
+ DMA_FROM_DEVICE);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret)
+ goto err;
+
+ spin_lock_bh(&ring->idr_lock);
+ buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
+ spin_unlock_bh(&ring->idr_lock);
+ if (buf_id < 0) {
+ ret = -ENOBUFS;
+ goto err_dma_unmap;
+ }
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOENT;
+ goto err_idr_remove;
+ }
+
+ buff->paddr = paddr;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
+
+ ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
+ ath11k_hal_srng_access_end(ab, srng);
+
+ return 0;
+
+err_idr_remove:
+ spin_lock_bh(&ring->idr_lock);
+ idr_remove(&ring->bufs_idr, buf_id);
+ spin_unlock_bh(&ring->idr_lock);
+err_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, ring->buf_sz,
+ DMA_FROM_DEVICE);
+err:
+ ath11k_hal_srng_access_end(ab, srng);
+ return ret;
+}
+
+static int ath11k_dbring_fill_bufs(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ enum wmi_direct_buffer_module id)
+{
+ struct ath11k_dbring_element *buff;
+ struct hal_srng *srng;
+ int num_remain, req_entries, num_free;
+ u32 align;
+ int size, ret;
+
+ srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
+ req_entries = min(num_free, ring->bufs_max);
+ num_remain = req_entries;
+ align = ring->buf_align;
+ size = ring->buf_sz + align - 1;
+
+ while (num_remain > 0) {
+ buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
+ if (!buff)
+ break;
+
+ buff->payload = kzalloc(size, GFP_ATOMIC);
+ if (!buff->payload) {
+ kfree(buff);
+ break;
+ }
+ ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
+ num_remain, req_entries);
+ kfree(buff->payload);
+ kfree(buff);
+ break;
+ }
+ num_remain--;
+ }
+
+ spin_unlock_bh(&srng->lock);
+
+ return num_remain;
+}
+
+int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ enum wmi_direct_buffer_module id)
+{
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {0};
+ int ret;
+
+ if (id >= WMI_DIRECT_BUF_MAX)
+ return -EINVAL;
+
+ param.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
+ param.module_id = id;
+ param.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
+ param.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
+ param.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
+ param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
+ param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
+ param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
+ param.num_elems = ring->bufs_max;
+ param.buf_size = ring->buf_sz;
+ param.num_resp_per_event = ring->num_resp_per_event;
+ param.event_timeout_ms = ring->event_timeout_ms;
+
+ ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
+ u32 num_resp_per_event, u32 event_timeout_ms,
+ int (*handler)(struct ath11k *,
+ struct ath11k_dbring_data *))
+{
+ if (WARN_ON(!ring))
+ return -EINVAL;
+
+ ring->num_resp_per_event = num_resp_per_event;
+ ring->event_timeout_ms = event_timeout_ms;
+ ring->handler = handler;
+
+ return 0;
+}
+
+int ath11k_dbring_buf_setup(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ struct ath11k_dbring_cap *db_cap)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_srng *srng;
+ int ret;
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+ ring->bufs_max = ring->refill_srng.size /
+ ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
+
+ ring->buf_sz = db_cap->min_buf_sz;
+ ring->buf_align = db_cap->min_buf_align;
+ ring->pdev_id = db_cap->pdev_id;
+ ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
+ ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
+
+ ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
+
+ return ret;
+}
+
+int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
+ int ring_num, int num_entries)
+{
+ int ret;
+
+ ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
+ ring_num, ar->pdev_idx, num_entries);
+ if (ret < 0) {
+ ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ goto err;
+ }
+
+ return 0;
+err:
+ ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+ return ret;
+}
+
+int ath11k_dbring_get_cap(struct ath11k_base *ab,
+ u8 pdev_idx,
+ enum wmi_direct_buffer_module id,
+ struct ath11k_dbring_cap *db_cap)
+{
+ int i;
+
+ if (!ab->num_db_cap || !ab->db_caps)
+ return -ENOENT;
+
+ if (id >= WMI_DIRECT_BUF_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < ab->num_db_cap; i++) {
+ if (pdev_idx == ab->db_caps[i].pdev_id &&
+ id == ab->db_caps[i].id) {
+ *db_cap = ab->db_caps[i];
+
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
+ struct ath11k_dbring_buf_release_event *ev)
+{
+ struct ath11k_dbring *ring;
+ struct hal_srng *srng;
+ struct ath11k *ar;
+ struct ath11k_dbring_element *buff;
+ struct ath11k_dbring_data handler_data;
+ struct ath11k_buffer_addr desc;
+ u8 *vaddr_unalign;
+ u32 num_entry, num_buff_reaped;
+ u8 pdev_idx, rbm, module_id;
+ u32 cookie;
+ int buf_id;
+ int size;
+ dma_addr_t paddr;
+ int ret = 0;
+
+ pdev_idx = ev->fixed.pdev_id;
+ module_id = ev->fixed.module_id;
+
+ if (pdev_idx >= ab->num_radios) {
+ ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
+ return -EINVAL;
+ }
+
+ if (ev->fixed.num_buf_release_entry !=
+ ev->fixed.num_meta_data_entry) {
+ ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
+ ev->fixed.num_buf_release_entry,
+ ev->fixed.num_meta_data_entry);
+ return -EINVAL;
+ }
+
+ ar = ab->pdevs[pdev_idx].ar;
+
+ rcu_read_lock();
+ if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
+ ret = -EINVAL;
+ goto rcu_unlock;
+ }
+
+ switch (ev->fixed.module_id) {
+ case WMI_DIRECT_BUF_SPECTRAL:
+ ring = ath11k_spectral_get_dbring(ar);
+ break;
+ default:
+ ring = NULL;
+ ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
+ ev->fixed.module_id);
+ break;
+ }
+
+ if (!ring) {
+ ret = -EINVAL;
+ goto rcu_unlock;
+ }
+
+ srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
+ num_entry = ev->fixed.num_buf_release_entry;
+ size = ring->buf_sz + ring->buf_align - 1;
+ num_buff_reaped = 0;
+
+ spin_lock_bh(&srng->lock);
+
+ while (num_buff_reaped < num_entry) {
+ desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
+ desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
+ handler_data.meta = ev->meta_data[num_buff_reaped];
+
+ num_buff_reaped++;
+
+ ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&ring->idr_lock);
+ buff = idr_find(&ring->bufs_idr, buf_id);
+ if (!buff) {
+ spin_unlock_bh(&ring->idr_lock);
+ continue;
+ }
+ idr_remove(&ring->bufs_idr, buf_id);
+ spin_unlock_bh(&ring->idr_lock);
+
+ dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
+ DMA_FROM_DEVICE);
+
+ ath11k_debugfs_add_dbring_entry(ar, module_id,
+ ATH11K_DBG_DBR_EVENT_RX, srng);
+
+ if (ring->handler) {
+ vaddr_unalign = buff->payload;
+ handler_data.data = PTR_ALIGN(vaddr_unalign,
+ ring->buf_align);
+ handler_data.data_sz = ring->buf_sz;
+
+ ring->handler(ar, &handler_data);
+ }
+
+ buff->paddr = 0;
+ memset(buff->payload, 0, size);
+ ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
+ }
+
+ spin_unlock_bh(&srng->lock);
+
+rcu_unlock:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
+{
+ ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
+}
+
+void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
+{
+ struct ath11k_dbring_element *buff;
+ int buf_id;
+
+ spin_lock_bh(&ring->idr_lock);
+ idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
+ idr_remove(&ring->bufs_idr, buf_id);
+ dma_unmap_single(ar->ab->dev, buff->paddr,
+ ring->buf_sz, DMA_FROM_DEVICE);
+ kfree(buff->payload);
+ kfree(buff);
+ }
+
+ idr_destroy(&ring->bufs_idr);
+ spin_unlock_bh(&ring->idr_lock);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dbring.h b/drivers/net/wireless/ath/ath11k/dbring.h
new file mode 100644
index 0000000000..ef906c687b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dbring.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_DBRING_H
+#define ATH11K_DBRING_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/spinlock.h>
+#include "dp.h"
+
+struct ath11k_dbring_element {
+ dma_addr_t paddr;
+ u8 *payload;
+};
+
+struct ath11k_dbring_data {
+ void *data;
+ u32 data_sz;
+ struct wmi_dma_buf_release_meta_data meta;
+};
+
+struct ath11k_dbring_buf_release_event {
+ struct ath11k_wmi_dma_buf_release_fixed_param fixed;
+ struct wmi_dma_buf_release_entry *buf_entry;
+ struct wmi_dma_buf_release_meta_data *meta_data;
+ u32 num_buf_entry;
+ u32 num_meta;
+};
+
+struct ath11k_dbring_cap {
+ u32 pdev_id;
+ enum wmi_direct_buffer_module id;
+ u32 min_elem;
+ u32 min_buf_sz;
+ u32 min_buf_align;
+};
+
+struct ath11k_dbring {
+ struct dp_srng refill_srng;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ dma_addr_t tp_addr;
+ dma_addr_t hp_addr;
+ int bufs_max;
+ u32 pdev_id;
+ u32 buf_sz;
+ u32 buf_align;
+ u32 num_resp_per_event;
+ u32 event_timeout_ms;
+ int (*handler)(struct ath11k *, struct ath11k_dbring_data *);
+};
+
+int ath11k_dbring_set_cfg(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ u32 num_resp_per_event,
+ u32 event_timeout_ms,
+ int (*handler)(struct ath11k *,
+ struct ath11k_dbring_data *));
+int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ enum wmi_direct_buffer_module id);
+int ath11k_dbring_buf_setup(struct ath11k *ar,
+ struct ath11k_dbring *ring,
+ struct ath11k_dbring_cap *db_cap);
+int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
+ int ring_num, int num_entries);
+int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
+ struct ath11k_dbring_buf_release_event *ev);
+int ath11k_dbring_get_cap(struct ath11k_base *ab,
+ u8 pdev_idx,
+ enum wmi_direct_buffer_module id,
+ struct ath11k_dbring_cap *db_cap);
+void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
+void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring);
+int ath11k_dbring_validate_buffer(struct ath11k *ar, void *data, u32 size);
+
+#endif /* ATH11K_DBRING_H */
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
new file mode 100644
index 0000000000..f5c8a34c88
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+
+void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_info(ab->dev, "%pV", &vaf);
+ trace_ath11k_log_info(ab, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath11k_info);
+
+void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_err(ab->dev, "%pV", &vaf);
+ trace_ath11k_log_err(ab, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath11k_err);
+
+void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+ dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+ trace_ath11k_log_warn(ab, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(ath11k_warn);
+
+#ifdef CONFIG_ATH11K_DEBUG
+
+void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ if (ath11k_debug_mask & mask)
+ dev_printk(KERN_DEBUG, ab->dev, "%s %pV", ath11k_dbg_str(mask), &vaf);
+
+ trace_ath11k_log_dbg(ab, mask, &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__ath11k_dbg);
+
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+ char linebuf[256];
+ size_t linebuflen;
+ const void *ptr;
+
+ if (ath11k_debug_mask & mask) {
+ if (msg)
+ __ath11k_dbg(ab, mask, "%s\n", msg);
+
+ for (ptr = buf; (ptr - buf) < len; ptr += 16) {
+ linebuflen = 0;
+ linebuflen += scnprintf(linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen,
+ "%s%08x: ",
+ (prefix ? prefix : ""),
+ (unsigned int)(ptr - buf));
+ hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
+ linebuf + linebuflen,
+ sizeof(linebuf) - linebuflen, true);
+ dev_printk(KERN_DEBUG, ab->dev, "%s\n", linebuf);
+ }
+ }
+
+ /* tracing code doesn't like null strings */
+ trace_ath11k_log_dbg_dump(ab, msg ? msg : "", prefix ? prefix : "",
+ buf, len);
+}
+EXPORT_SYMBOL(ath11k_dbg_dump);
+
+#endif /* CONFIG_ATH11K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
new file mode 100644
index 0000000000..9c52804ef8
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUG_H_
+#define _ATH11K_DEBUG_H_
+
+#include "trace.h"
+#include "debugfs.h"
+
+enum ath11k_debug_mask {
+ ATH11K_DBG_AHB = 0x00000001,
+ ATH11K_DBG_WMI = 0x00000002,
+ ATH11K_DBG_HTC = 0x00000004,
+ ATH11K_DBG_DP_HTT = 0x00000008,
+ ATH11K_DBG_MAC = 0x00000010,
+ ATH11K_DBG_BOOT = 0x00000020,
+ ATH11K_DBG_QMI = 0x00000040,
+ ATH11K_DBG_DATA = 0x00000080,
+ ATH11K_DBG_MGMT = 0x00000100,
+ ATH11K_DBG_REG = 0x00000200,
+ ATH11K_DBG_TESTMODE = 0x00000400,
+ ATH11K_DBG_HAL = 0x00000800,
+ ATH11K_DBG_PCI = 0x00001000,
+ ATH11K_DBG_DP_TX = 0x00002000,
+ ATH11K_DBG_DP_RX = 0x00004000,
+ ATH11K_DBG_CE = 0x00008000,
+};
+
+static inline const char *ath11k_dbg_str(enum ath11k_debug_mask mask)
+{
+ switch (mask) {
+ case ATH11K_DBG_AHB:
+ return "ahb";
+ case ATH11K_DBG_WMI:
+ return "wmi";
+ case ATH11K_DBG_HTC:
+ return "htc";
+ case ATH11K_DBG_DP_HTT:
+ return "dp_htt";
+ case ATH11K_DBG_MAC:
+ return "mac";
+ case ATH11K_DBG_BOOT:
+ return "boot";
+ case ATH11K_DBG_QMI:
+ return "qmi";
+ case ATH11K_DBG_DATA:
+ return "data";
+ case ATH11K_DBG_MGMT:
+ return "mgmt";
+ case ATH11K_DBG_REG:
+ return "reg";
+ case ATH11K_DBG_TESTMODE:
+ return "testmode";
+ case ATH11K_DBG_HAL:
+ return "hal";
+ case ATH11K_DBG_PCI:
+ return "pci";
+ case ATH11K_DBG_DP_TX:
+ return "dp_tx";
+ case ATH11K_DBG_DP_RX:
+ return "dp_rx";
+ case ATH11K_DBG_CE:
+ return "ce";
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<?>";
+}
+
+__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
+__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
+
+extern unsigned int ath11k_debug_mask;
+
+#ifdef CONFIG_ATH11K_DEBUG
+__printf(3, 4) void __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *fmt, ...);
+void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len);
+#else /* CONFIG_ATH11K_DEBUG */
+static inline int __ath11k_dbg(struct ath11k_base *ab,
+ enum ath11k_debug_mask dbg_mask,
+ const char *fmt, ...)
+{
+ return 0;
+}
+
+static inline void ath11k_dbg_dump(struct ath11k_base *ab,
+ enum ath11k_debug_mask mask,
+ const char *msg, const char *prefix,
+ const void *buf, size_t len)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUG */
+
+#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
+do { \
+ if ((ath11k_debug_mask & dbg_mask) || \
+ trace_ath11k_log_dbg_enabled()) \
+ __ath11k_dbg(ar, dbg_mask, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _ATH11K_DEBUG_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
new file mode 100644
index 0000000000..5bb6fd17fd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -0,0 +1,1927 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "debugfs.h"
+
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "debugfs_htt_stats.h"
+#include "peer.h"
+#include "hif.h"
+
+static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
+ "REO2SW1_RING",
+ "REO2SW2_RING",
+ "REO2SW3_RING",
+ "REO2SW4_RING",
+ "WBM2REO_LINK_RING",
+ "REO2TCL_RING",
+ "REO2FW_RING",
+ "RELEASE_RING",
+ "PPE_RELEASE_RING",
+ "TCL2TQM_RING",
+ "TQM_RELEASE_RING",
+ "REO_RELEASE_RING",
+ "WBM2SW0_RELEASE_RING",
+ "WBM2SW1_RELEASE_RING",
+ "WBM2SW2_RELEASE_RING",
+ "WBM2SW3_RELEASE_RING",
+ "REO_CMD_RING",
+ "REO_STATUS_RING",
+};
+
+static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
+ "FW2RXDMA_BUF_RING",
+ "FW2RXDMA_STATUS_RING",
+ "FW2RXDMA_LINK_RING",
+ "SW2RXDMA_BUF_RING",
+ "WBM2RXDMA_LINK_RING",
+ "RXDMA2FW_RING",
+ "RXDMA2SW_RING",
+ "RXDMA2RELEASE_RING",
+ "RXDMA2REO_RING",
+ "MONITOR_STATUS_RING",
+ "MONITOR_BUF_RING",
+ "MONITOR_DESC_RING",
+ "MONITOR_DEST_RING",
+};
+
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_data;
+ struct ath11k_dbg_dbr_entry *entry;
+
+ if (id >= WMI_DIRECT_BUF_MAX || event >= ATH11K_DBG_DBR_EVENT_MAX)
+ return;
+
+ dbr_debug = ar->debug.dbr_debug[id];
+ if (!dbr_debug)
+ return;
+
+ if (!dbr_debug->dbr_debug_enabled)
+ return;
+
+ dbr_data = &dbr_debug->dbr_dbg_data;
+
+ spin_lock_bh(&dbr_data->lock);
+
+ if (dbr_data->entries) {
+ entry = &dbr_data->entries[dbr_data->dbr_debug_idx];
+ entry->hp = srng->u.src_ring.hp;
+ entry->tp = *srng->u.src_ring.tp_addr;
+ entry->timestamp = jiffies;
+ entry->event = event;
+
+ dbr_data->dbr_debug_idx++;
+ if (dbr_data->dbr_debug_idx ==
+ dbr_data->num_ring_debug_entries)
+ dbr_data->dbr_debug_idx = 0;
+ }
+
+ spin_unlock_bh(&dbr_data->lock);
+}
+
+static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev *pdev;
+ bool is_end;
+ static unsigned int num_vdev, num_bcn;
+ size_t total_vdevs_started = 0;
+ int i;
+
+ /* WMI_REQUEST_PDEV_STAT request has been already processed */
+
+ if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ ar->fw_stats_done = true;
+ return;
+ }
+
+ if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats->vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ return;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ is_end = ((++num_vdev) == total_vdevs_started);
+
+ list_splice_tail_init(&stats->vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end) {
+ ar->fw_stats_done = true;
+ num_vdev = 0;
+ }
+ return;
+ }
+
+ if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats->bcn)) {
+ ath11k_warn(ab, "empty bcn stats");
+ return;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ is_end = ((++num_bcn) == ar->num_started_vdevs);
+
+ list_splice_tail_init(&stats->bcn,
+ &ar->fw_stats.bcn);
+
+ if (is_end) {
+ ar->fw_stats_done = true;
+ num_bcn = 0;
+ }
+ }
+}
+
+static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ timeout = jiffies + msecs_to_jiffies(3 * 1000);
+
+ ath11k_debugfs_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ return 0;
+}
+
+int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = pdev_id;
+ req_param.vdev_id = vdev_id;
+ req_param.stats_id = stats_id;
+
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret)
+ ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ pdev_id, vdev_id, stats_id);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath11k_open_pdev_stats,
+ .release = ath11k_release_pdev_stats,
+ .read = ath11k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath11k_open_vdev_stats,
+ .release = ath11k_release_vdev_stats,
+ .read = ath11k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_vif *arvif;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.stats_id = WMI_REQUEST_BCN_STAT;
+ req_param.pdev_id = ar->pdev->pdev_id;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ req_param.vdev_id = arvif->vdev_id;
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ goto err_free;
+ }
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->fw_stats, req_param.stats_id, buf);
+
+ /* since beacon stats request is looped for all active VDEVs, saved fw
+ * stats is not freed for each request until done for all active VDEVs
+ */
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_bcn_free(&ar->fw_stats.bcn);
+ spin_unlock_bh(&ar->data_lock);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath11k_open_bcn_stats,
+ .release = ath11k_release_bcn_stats,
+ .read = ath11k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar = ab->pdevs[0].ar;
+ char buf[32] = {0};
+ ssize_t rc;
+ int i, ret, radioup = 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar && ar->state == ATH11K_STATE_ON) {
+ radioup = 1;
+ break;
+ }
+ }
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ if (radioup == 0) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!strcmp(buf, "assert")) {
+ ath11k_info(ab, "simulating firmware assert crash\n");
+ ret = ath11k_wmi_force_fw_hang_cmd(ar,
+ ATH11K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH11K_WMI_FW_HANG_DELAY);
+ } else if (!strcmp(buf, "hw-restart")) {
+ ath11k_info(ab, "user requested hw restart\n");
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+ ret = 0;
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath11k_read_simulate_fw_crash,
+ .write = ath11k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (filter == ar->debug.extd_tx_stats) {
+ ret = count;
+ goto out;
+ }
+
+ ar->debug.extd_tx_stats = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.extd_tx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_tx_stats = {
+ .read = ath11k_read_enable_extd_tx_stats,
+ .write = ath11k_write_enable_extd_tx_stats,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 enable, rx_filter = 0, ring_id;
+ int i;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable > 1) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ }
+
+ ar->debug.rx_filter = tlv_filter.rx_filter;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+ }
+
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath11k_read_extd_rx_stats,
+ .write = ath11k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
+static int ath11k_fill_bp_stats(struct ath11k_base *ab,
+ struct ath11k_bp_stats *bp_stats,
+ char *buf, int len, int size)
+{
+ lockdep_assert_held(&ab->base_lock);
+
+ len += scnprintf(buf + len, size - len, "count: %u\n",
+ bp_stats->count);
+ len += scnprintf(buf + len, size - len, "hp: %u\n",
+ bp_stats->hp);
+ len += scnprintf(buf + len, size - len, "tp: %u\n",
+ bp_stats->tp);
+ len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
+ jiffies_to_msecs(jiffies - bp_stats->jiffies));
+ return len;
+}
+
+static ssize_t ath11k_debugfs_dump_soc_ring_bp_stats(struct ath11k_base *ab,
+ char *buf, int size)
+{
+ struct ath11k_bp_stats *bp_stats;
+ bool stats_rxd = false;
+ u8 i, pdev_idx;
+ int len = 0;
+
+ len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
+ len += scnprintf(buf + len, size - len, "==================\n");
+
+ spin_lock_bh(&ab->base_lock);
+ for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
+ bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
+
+ if (!bp_stats->count)
+ continue;
+
+ len += scnprintf(buf + len, size - len, "Ring: %s\n",
+ htt_bp_umac_ring[i]);
+ len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+ stats_rxd = true;
+ }
+
+ for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
+ for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
+ bp_stats =
+ &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
+
+ if (!bp_stats->count)
+ continue;
+
+ len += scnprintf(buf + len, size - len, "Ring: %s\n",
+ htt_bp_lmac_ring[i]);
+ len += scnprintf(buf + len, size - len, "pdev: %d\n",
+ pdev_idx);
+ len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+ stats_rxd = true;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!stats_rxd)
+ len += scnprintf(buf + len, size - len,
+ "No Ring Backpressure stats received\n\n");
+
+ return len;
+}
+
+static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats;
+ int len = 0, i, retval;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
+ "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
+ "AMSDU parse", "SA timeout", "DA timeout",
+ "Flow timeout", "Flush req"};
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ "Desc addr zero", "Desc inval", "AMPDU in non BA",
+ "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
+ "Frame OOR", "BAR OOR", "No BA session",
+ "Frame SN equal SSN", "PN check fail", "2k err",
+ "PN err", "Desc blocked"};
+
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ soc_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ soc_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], soc_stats->rxdma_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], soc_stats->reo_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
+ len += scnprintf(buf + len, size - len,
+ "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
+ soc_stats->hal_reo_error[0],
+ soc_stats->hal_reo_error[1],
+ soc_stats->hal_reo_error[2],
+ soc_stats->hal_reo_error[3]);
+
+ len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
+ len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++)
+ len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+ i, soc_stats->tx_err.desc_na[i]);
+
+ len += scnprintf(buf + len, size - len,
+ "\nMisc Transmit Failures: %d\n",
+ atomic_read(&soc_stats->tx_err.misc_fail));
+
+ len += ath11k_debugfs_dump_soc_ring_bp_stats(ab, buf + len, size - len);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_soc_dp_stats = {
+ .read = ath11k_debugfs_dump_soc_dp_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_fw_dbglog(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[128] = {0};
+ struct ath11k_fw_dbglog dbglog;
+ unsigned int param, mod_id_index, is_end;
+ u64 value;
+ int ret, num;
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ num = sscanf(buf, "%u %llx %u %u", &param, &value, &mod_id_index, &is_end);
+
+ if (num < 2)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (param == WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP ||
+ param == WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP) {
+ if (num != 4 || mod_id_index > (MAX_MODULE_ID_BITMAP_WORDS - 1)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ar->debug.module_id_bitmap[mod_id_index] = upper_32_bits(value);
+ if (!is_end) {
+ ret = count;
+ goto out;
+ }
+ } else {
+ if (num != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ dbglog.param = param;
+ dbglog.value = lower_32_bits(value);
+ ret = ath11k_wmi_fw_dbglog_cfg(ar, ar->debug.module_id_bitmap, &dbglog);
+ if (ret) {
+ ath11k_warn(ar->ab, "fw dbglog config failed from debugfs: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_fw_dbglog = {
+ .write = ath11k_write_fw_dbglog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_sram_dump(struct inode *inode, struct file *file)
+{
+ struct ath11k_base *ab = inode->i_private;
+ u8 *buf;
+ u32 start, end;
+ int ret;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+
+ buf = vmalloc(end - start + 1);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ath11k_hif_read(ab, buf, start, end);
+ if (ret) {
+ ath11k_warn(ab, "failed to dump sram: %d\n", ret);
+ vfree(buf);
+ return ret;
+ }
+
+ file->private_data = buf;
+ return 0;
+}
+
+static ssize_t ath11k_read_sram_dump(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->f_inode->i_private;
+ const char *buf = file->private_data;
+ int len;
+ u32 start, end;
+
+ start = ab->hw_params.sram_dump.start;
+ end = ab->hw_params.sram_dump.end;
+ len = end - start + 1;
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static int ath11k_release_sram_dump(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static const struct file_operations fops_sram_dump = {
+ .open = ath11k_open_sram_dump,
+ .read = ath11k_read_sram_dump,
+ .release = ath11k_release_sram_dump,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+{
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
+ &fops_soc_dp_stats);
+
+ if (ab->hw_params.sram_dump.start != 0)
+ debugfs_create_file("sram", 0400, ab->debugfs_soc, ab,
+ &fops_sram_dump);
+
+ return 0;
+}
+
+void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+}
+
+int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+{
+ struct dentry *root;
+ bool dput_needed;
+ char name[64];
+ int ret;
+
+ root = debugfs_lookup("ath11k", NULL);
+ if (!root) {
+ root = debugfs_create_dir("ath11k", NULL);
+ if (IS_ERR_OR_NULL(root))
+ return PTR_ERR(root);
+
+ dput_needed = false;
+ } else {
+ /* a dentry from lookup() needs dput() after we don't use it */
+ dput_needed = true;
+ }
+
+ scnprintf(name, sizeof(name), "%s-%s", ath11k_bus_str(ab->hif.bus),
+ dev_name(ab->dev));
+
+ ab->debugfs_soc = debugfs_create_dir(name, root);
+ if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
+ ret = PTR_ERR(ab->debugfs_soc);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ if (dput_needed)
+ dput(root);
+
+ return ret;
+}
+
+void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+
+ /* We are not removing ath11k directory on purpose, even if it
+ * would be empty. This simplifies the directory handling and it's
+ * a minor cosmetic issue to leave an empty ath11k directory to
+ * debugfs.
+ */
+}
+EXPORT_SYMBOL(ath11k_debugfs_soc_destroy);
+
+void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ ar->fw_stats.debugfs_fwstats = fwstats_dir;
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+}
+
+static ssize_t ath11k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 rx_filter = 0, ring_id, filter, mode;
+ u8 buf[128] = {0};
+ int i, ret, rx_buf_sz = 0;
+ ssize_t rc;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ buf[rc] = '\0';
+
+ ret = sscanf(buf, "0x%x %u", &filter, &mode);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (filter) {
+ ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath11k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+ /* Clear rx filter set for monitor mode and rx status */
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ rx_buf_sz, &tlv_filter);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+ }
+#define HTT_RX_FILTER_TLV_LITE_MODE \
+ (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
+
+ if (mode == ATH11K_PKTLOG_MODE_FULL) {
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ } else if (mode == ATH11K_PKTLOG_MODE_LITE) {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_PKTLOG);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
+ goto out;
+ }
+
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else {
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ rx_filter = tlv_filter.rx_filter;
+
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ tlv_filter.rx_filter = rx_filter;
+ if (rx_filter) {
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ rx_buf_sz, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+ }
+
+ ath11k_info(ab, "pktlog mode %s\n",
+ ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+
+ ar->debug.pktlog_filter = filter;
+ ar->debug.pktlog_mode = mode;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_pktlog_filter(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
+ ar->debug.pktlog_filter,
+ ar->debug.pktlog_mode);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath11k_read_pktlog_filter,
+ .write = ath11k_write_pktlog_filter,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+
+ ret = ath11k_wmi_simulate_radar(ar);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath11k_write_simulate_radar,
+ .open = simple_open
+};
+
+static ssize_t ath11k_debug_dump_dbr_entries(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_dbg_dbr_data *dbr_dbg_data = file->private_data;
+ static const char * const event_id_to_string[] = {"empty", "Rx", "Replenish"};
+ int size = ATH11K_DEBUG_DBR_ENTRIES_MAX * 100;
+ char *buf;
+ int i, ret;
+ int len = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+ len += scnprintf(buf + len, size - len,
+ "| idx | hp | tp | timestamp | event |\n");
+ len += scnprintf(buf + len, size - len,
+ "-----------------------------------------\n");
+
+ spin_lock_bh(&dbr_dbg_data->lock);
+
+ for (i = 0; i < dbr_dbg_data->num_ring_debug_entries; i++) {
+ len += scnprintf(buf + len, size - len,
+ "|%4u|%8u|%8u|%11llu|%8s|\n", i,
+ dbr_dbg_data->entries[i].hp,
+ dbr_dbg_data->entries[i].tp,
+ dbr_dbg_data->entries[i].timestamp,
+ event_id_to_string[dbr_dbg_data->entries[i].event]);
+ }
+
+ spin_unlock_bh(&dbr_dbg_data->lock);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return ret;
+}
+
+static const struct file_operations fops_debug_dump_dbr_entries = {
+ .read = ath11k_debug_dump_dbr_entries,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_debugfs_dbr_dbg_destroy(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_dbg_data->entries);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[dbr_id] = NULL;
+}
+
+static int ath11k_debugfs_dbr_dbg_init(struct ath11k *ar, int dbr_id)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ static const char * const dbr_id_to_str[] = {"spectral", "CFR"};
+
+ if (ar->debug.dbr_debug[dbr_id])
+ return 0;
+
+ ar->debug.dbr_debug[dbr_id] = kzalloc(sizeof(*dbr_debug),
+ GFP_KERNEL);
+
+ if (!ar->debug.dbr_debug[dbr_id])
+ return -ENOMEM;
+
+ dbr_debug = ar->debug.dbr_debug[dbr_id];
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+
+ if (dbr_debug->dbr_debugfs)
+ return 0;
+
+ dbr_debug->dbr_debugfs = debugfs_create_dir(dbr_id_to_str[dbr_id],
+ ar->debug.debugfs_pdev);
+ if (IS_ERR_OR_NULL(dbr_debug->dbr_debugfs)) {
+ if (IS_ERR(dbr_debug->dbr_debugfs))
+ return PTR_ERR(dbr_debug->dbr_debugfs);
+ return -ENOMEM;
+ }
+
+ dbr_debug->dbr_debug_enabled = true;
+ dbr_dbg_data->num_ring_debug_entries = ATH11K_DEBUG_DBR_ENTRIES_MAX;
+ dbr_dbg_data->dbr_debug_idx = 0;
+ dbr_dbg_data->entries = kcalloc(ATH11K_DEBUG_DBR_ENTRIES_MAX,
+ sizeof(struct ath11k_dbg_dbr_entry),
+ GFP_KERNEL);
+ if (!dbr_dbg_data->entries)
+ return -ENOMEM;
+
+ spin_lock_init(&dbr_dbg_data->lock);
+
+ debugfs_create_file("dump_dbr_debug", 0444, dbr_debug->dbr_debugfs,
+ dbr_dbg_data, &fops_debug_dump_dbr_entries);
+
+ return 0;
+}
+
+static ssize_t ath11k_debugfs_write_enable_dbr_dbg(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32] = {0};
+ u32 dbr_id, enable;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ goto out;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%u %u", &dbr_id, &enable);
+ if (ret != 2 || dbr_id > 1 || enable > 1) {
+ ret = -EINVAL;
+ ath11k_warn(ar->ab, "usage: echo <dbr_id> <val> dbr_id:0-Spectral 1-CFR val:0-disable 1-enable\n");
+ goto out;
+ }
+
+ if (enable) {
+ ret = ath11k_debugfs_dbr_dbg_init(ar, dbr_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "db ring module debugfs init failed: %d\n",
+ ret);
+ goto out;
+ }
+ } else {
+ ath11k_debugfs_dbr_dbg_destroy(ar, dbr_id);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_dbr_debug = {
+ .write = ath11k_debugfs_write_enable_dbr_dbg,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_ps_timekeeper_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ ssize_t ret;
+ u8 ps_timekeeper_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_timekeeper_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ar->ps_timekeeper_enable = !!ps_timekeeper_enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_timekeeper_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_timekeeper_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_timekeeper_enable = {
+ .read = ath11k_read_ps_timekeeper_enable,
+ .write = ath11k_write_ps_timekeeper_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_reset_peer_ps_duration(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_reset_ps_duration(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+ u8 reset_ps_duration;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &reset_ps_duration))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!ar->ps_state_enable) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_reset_peer_ps_duration,
+ ar);
+
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_reset_ps_duration = {
+ .write = ath11k_write_reset_ps_duration,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static void ath11k_peer_ps_state_disable(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ arsta->ps_start_time = 0;
+ arsta->ps_total_duration = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static ssize_t ath11k_write_ps_state_enable(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+ u32 param;
+ u8 ps_state_enable;
+
+ if (kstrtou8_from_user(user_buf, count, 0, &ps_state_enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ps_state_enable = !!ps_state_enable;
+
+ if (ar->ps_state_enable == ps_state_enable) {
+ ret = count;
+ goto exit;
+ }
+
+ param = WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE;
+ ret = ath11k_wmi_pdev_set_param(ar, param, ps_state_enable, pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable ps_state_enable: %d\n",
+ ret);
+ goto exit;
+ }
+ ar->ps_state_enable = ps_state_enable;
+
+ if (!ar->ps_state_enable) {
+ ar->ps_timekeeper_enable = false;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_peer_ps_state_disable,
+ ar);
+ }
+
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static ssize_t ath11k_read_ps_state_enable(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%d\n", ar->ps_state_enable);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_ps_state_enable = {
+ .read = ath11k_read_ps_state_enable,
+ .write = ath11k_write_ps_state_enable,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath11k_debugfs_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ char pdev_name[5];
+ char buf[100] = {0};
+
+ snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+ if (IS_ERR(ar->debug.debugfs_pdev))
+ return PTR_ERR(ar->debug.debugfs_pdev);
+
+ /* Create a symlink under ieee80211/phy* */
+ snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
+ debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
+
+ ath11k_debugfs_htt_stats_init(ar);
+
+ ath11k_debugfs_fw_stats_init(ar);
+
+ debugfs_create_file("ext_tx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_tx_stats);
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
+ debugfs_create_file("pktlog_filter", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_pktlog_filter);
+ debugfs_create_file("fw_dbglog_config", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_fw_dbglog);
+
+ if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ debugfs_create_bool("dfs_block_radar_events", 0200,
+ ar->debug.debugfs_pdev,
+ &ar->dfs_block_radar_events);
+ }
+
+ if (ab->hw_params.dbr_debug_support)
+ debugfs_create_file("enable_dbr_debug", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_dbr_debug);
+
+ debugfs_create_file("ps_state_enable", 0600, ar->debug.debugfs_pdev, ar,
+ &fops_ps_state_enable);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("ps_timekeeper_enable", 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_ps_timekeeper_enable);
+
+ debugfs_create_file("reset_ps_duration", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_reset_ps_duration);
+ }
+
+ return 0;
+}
+
+void ath11k_debugfs_unregister(struct ath11k *ar)
+{
+ struct ath11k_debug_dbr *dbr_debug;
+ struct ath11k_dbg_dbr_data *dbr_dbg_data;
+ int i;
+
+ for (i = 0; i < WMI_DIRECT_BUF_MAX; i++) {
+ dbr_debug = ar->debug.dbr_debug[i];
+ if (!dbr_debug)
+ continue;
+
+ dbr_dbg_data = &dbr_debug->dbr_dbg_data;
+ kfree(dbr_dbg_data->entries);
+ debugfs_remove_recursive(dbr_debug->dbr_debugfs);
+ kfree(dbr_debug);
+ ar->debug.dbr_debug[i] = NULL;
+ }
+}
+
+static ssize_t ath11k_write_twt_add_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_add_dialog_params params = { 0 };
+ struct wmi_twt_enable_params twt_params = {0};
+ struct ath11k *ar = arvif->ar;
+ u8 buf[128] = {0};
+ int ret;
+
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf,
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u %u %u %hhu %hhu %hhu %hhu %hhu",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.wake_intvl_us,
+ &params.wake_intvl_mantis,
+ &params.wake_dura_us,
+ &params.sp_offset_us,
+ &params.twt_cmd,
+ &params.flag_bcast,
+ &params.flag_trigger,
+ &params.flag_flow_type,
+ &params.flag_protection);
+ if (ret != 16)
+ return -EINVAL;
+
+ /* In the case of station vif, TWT is entirely handled by
+ * the firmware based on the input parameters in the TWT enable
+ * WMI command that is sent to the target during assoc.
+ * For manually testing the TWT feature, we need to first disable
+ * TWT and send enable command again with TWT input parameter
+ * sta_cong_timer_ms set to 0.
+ */
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ twt_params.sta_cong_timer_ms = 0;
+
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_add_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ goto err_twt_add_dialog;
+
+ return count;
+
+err_twt_add_dialog:
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ return ret;
+}
+
+static ssize_t ath11k_write_twt_del_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_del_dialog_params params = { 0 };
+ struct wmi_twt_enable_params twt_params = {0};
+ struct ath11k *ar = arvif->ar;
+ u8 buf[64] = {0};
+ int ret;
+
+ if (ar->twt_enabled == 0) {
+ ath11k_err(ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_del_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id, &twt_params);
+ }
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_pause_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_pause_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id);
+ if (ret != 7)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_pause_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t ath11k_write_twt_resume_dialog(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_vif *arvif = file->private_data;
+ struct wmi_twt_resume_dialog_params params = { 0 };
+ u8 buf[64] = {0};
+ int ret;
+
+ if (arvif->ar->twt_enabled == 0) {
+ ath11k_err(arvif->ar->ab, "twt support is not enabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (ret < 0)
+ return ret;
+
+ buf[ret] = '\0';
+ ret = sscanf(buf, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx %u %u %u",
+ &params.peer_macaddr[0],
+ &params.peer_macaddr[1],
+ &params.peer_macaddr[2],
+ &params.peer_macaddr[3],
+ &params.peer_macaddr[4],
+ &params.peer_macaddr[5],
+ &params.dialog_id,
+ &params.sp_offset_us,
+ &params.next_twt_size);
+ if (ret != 9)
+ return -EINVAL;
+
+ params.vdev_id = arvif->vdev_id;
+
+ ret = ath11k_wmi_send_twt_resume_dialog_cmd(arvif->ar, &params);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations ath11k_fops_twt_add_dialog = {
+ .write = ath11k_write_twt_add_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_del_dialog = {
+ .write = ath11k_write_twt_del_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_pause_dialog = {
+ .write = ath11k_write_twt_pause_dialog,
+ .open = simple_open
+};
+
+static const struct file_operations ath11k_fops_twt_resume_dialog = {
+ .write = ath11k_write_twt_resume_dialog,
+ .open = simple_open
+};
+
+void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+ struct ath11k_base *ab = arvif->ar->ab;
+
+ if (arvif->vif->type != NL80211_IFTYPE_AP &&
+ !(arvif->vif->type == NL80211_IFTYPE_STATION &&
+ test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
+ return;
+
+ arvif->debugfs_twt = debugfs_create_dir("twt",
+ arvif->vif->debugfs_dir);
+ debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_add_dialog);
+
+ debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_del_dialog);
+
+ debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_pause_dialog);
+
+ debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+ arvif, &ath11k_fops_twt_resume_dialog);
+}
+
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+ if (!arvif->debugfs_twt)
+ return;
+
+ debugfs_remove_recursive(arvif->debugfs_twt);
+ arvif->debugfs_twt = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
new file mode 100644
index 0000000000..3af0169f6c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -0,0 +1,406 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUGFS_H_
+#define _ATH11K_DEBUGFS_H_
+
+#include "hal_tx.h"
+
+#define ATH11K_TX_POWER_MAX_VAL 70
+#define ATH11K_TX_POWER_MIN_VAL 0
+
+/* htt_dbg_ext_stats_type */
+enum ath11k_dbg_htt_ext_stats_type {
+ ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
+ ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
+ ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
+ ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
+ ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS = 29,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF_STATS = 31,
+ ATH11K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH11K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
+
+ /* keep this last */
+ ATH11K_DBG_HTT_NUM_EXT_STATS,
+};
+
+#define ATH11K_DEBUG_DBR_ENTRIES_MAX 512
+
+enum ath11k_dbg_dbr_event {
+ ATH11K_DBG_DBR_EVENT_INVALID,
+ ATH11K_DBG_DBR_EVENT_RX,
+ ATH11K_DBG_DBR_EVENT_REPLENISH,
+ ATH11K_DBG_DBR_EVENT_MAX,
+};
+
+struct ath11k_dbg_dbr_entry {
+ u32 hp;
+ u32 tp;
+ u64 timestamp;
+ enum ath11k_dbg_dbr_event event;
+};
+
+struct ath11k_dbg_dbr_data {
+ /* protects ath11k_db_ring_debug data */
+ spinlock_t lock;
+ struct ath11k_dbg_dbr_entry *entries;
+ u32 dbr_debug_idx;
+ u32 num_ring_debug_entries;
+};
+
+struct ath11k_debug_dbr {
+ struct ath11k_dbg_dbr_data dbr_dbg_data;
+ struct dentry *dbr_debugfs;
+ bool dbr_debug_enabled;
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ u8 pdev_id;
+ u8 type;
+ u8 peer_addr[ETH_ALEN];
+ struct completion cmpln;
+ u32 buf_len;
+ u8 buf[];
+};
+
+struct ath_pktlog_hdr {
+ u16 flags;
+ u16 missed_cnt;
+ u16 log_type;
+ u16 size;
+ u32 timestamp;
+ u32 type_specific_data;
+ u8 payload[];
+};
+
+#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
+
+#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
+#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+enum ath11k_pktlog_filter {
+ ATH11K_PKTLOG_RX = 0x000000001,
+ ATH11K_PKTLOG_TX = 0x000000002,
+ ATH11K_PKTLOG_RCFIND = 0x000000004,
+ ATH11K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
+ ATH11K_PKTLOG_EVENT_SW = 0x000000040,
+ ATH11K_PKTLOG_ANY = 0x00000006f,
+};
+
+enum ath11k_pktlog_mode {
+ ATH11K_PKTLOG_MODE_LITE = 1,
+ ATH11K_PKTLOG_MODE_FULL = 2,
+};
+
+enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_INVALID = 0,
+ ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH11K_PKTLOG_TYPE_TX_STAT = 2,
+ ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
+ ATH11K_PKTLOG_TYPE_RX_STAT = 5,
+ ATH11K_PKTLOG_TYPE_RC_FIND = 6,
+ ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
+ ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
+ ATH11K_PKTLOG_TYPE_RX_CBF = 10,
+ ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
+ ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
+ ATH11K_PKTLOG_TYPE_LITE_RX = 24,
+};
+
+enum ath11k_dbg_aggr_mode {
+ ATH11K_DBG_AGGR_MODE_AUTO,
+ ATH11K_DBG_AGGR_MODE_MANUAL,
+ ATH11K_DBG_AGGR_MODE_MAX,
+};
+
+enum fw_dbglog_wlan_module_id {
+ WLAN_MODULE_ID_MIN = 0,
+ WLAN_MODULE_INF = WLAN_MODULE_ID_MIN,
+ WLAN_MODULE_WMI,
+ WLAN_MODULE_STA_PWRSAVE,
+ WLAN_MODULE_WHAL,
+ WLAN_MODULE_COEX,
+ WLAN_MODULE_ROAM,
+ WLAN_MODULE_RESMGR_CHAN_MANAGER,
+ WLAN_MODULE_RESMGR,
+ WLAN_MODULE_VDEV_MGR,
+ WLAN_MODULE_SCAN,
+ WLAN_MODULE_RATECTRL,
+ WLAN_MODULE_AP_PWRSAVE,
+ WLAN_MODULE_BLOCKACK,
+ WLAN_MODULE_MGMT_TXRX,
+ WLAN_MODULE_DATA_TXRX,
+ WLAN_MODULE_HTT,
+ WLAN_MODULE_HOST,
+ WLAN_MODULE_BEACON,
+ WLAN_MODULE_OFFLOAD,
+ WLAN_MODULE_WAL,
+ WLAN_WAL_MODULE_DE,
+ WLAN_MODULE_PCIELP,
+ WLAN_MODULE_RTT,
+ WLAN_MODULE_RESOURCE,
+ WLAN_MODULE_DCS,
+ WLAN_MODULE_CACHEMGR,
+ WLAN_MODULE_ANI,
+ WLAN_MODULE_P2P,
+ WLAN_MODULE_CSA,
+ WLAN_MODULE_NLO,
+ WLAN_MODULE_CHATTER,
+ WLAN_MODULE_WOW,
+ WLAN_MODULE_WAL_VDEV,
+ WLAN_MODULE_WAL_PDEV,
+ WLAN_MODULE_TEST,
+ WLAN_MODULE_STA_SMPS,
+ WLAN_MODULE_SWBMISS,
+ WLAN_MODULE_WMMAC,
+ WLAN_MODULE_TDLS,
+ WLAN_MODULE_HB,
+ WLAN_MODULE_TXBF,
+ WLAN_MODULE_BATCH_SCAN,
+ WLAN_MODULE_THERMAL_MGR,
+ WLAN_MODULE_PHYERR_DFS,
+ WLAN_MODULE_RMC,
+ WLAN_MODULE_STATS,
+ WLAN_MODULE_NAN,
+ WLAN_MODULE_IBSS_PWRSAVE,
+ WLAN_MODULE_HIF_UART,
+ WLAN_MODULE_LPI,
+ WLAN_MODULE_EXTSCAN,
+ WLAN_MODULE_UNIT_TEST,
+ WLAN_MODULE_MLME,
+ WLAN_MODULE_SUPPL,
+ WLAN_MODULE_ERE,
+ WLAN_MODULE_OCB,
+ WLAN_MODULE_RSSI_MONITOR,
+ WLAN_MODULE_WPM,
+ WLAN_MODULE_CSS,
+ WLAN_MODULE_PPS,
+ WLAN_MODULE_SCAN_CH_PREDICT,
+ WLAN_MODULE_MAWC,
+ WLAN_MODULE_CMC_QMIC,
+ WLAN_MODULE_EGAP,
+ WLAN_MODULE_NAN20,
+ WLAN_MODULE_QBOOST,
+ WLAN_MODULE_P2P_LISTEN_OFFLOAD,
+ WLAN_MODULE_HALPHY,
+ WLAN_WAL_MODULE_ENQ,
+ WLAN_MODULE_GNSS,
+ WLAN_MODULE_WAL_MEM,
+ WLAN_MODULE_SCHED_ALGO,
+ WLAN_MODULE_TX,
+ WLAN_MODULE_RX,
+ WLAN_MODULE_WLM,
+ WLAN_MODULE_RU_ALLOCATOR,
+ WLAN_MODULE_11K_OFFLOAD,
+ WLAN_MODULE_STA_TWT,
+ WLAN_MODULE_AP_TWT,
+ WLAN_MODULE_UL_OFDMA,
+ WLAN_MODULE_HPCS_PULSE,
+ WLAN_MODULE_DTF,
+ WLAN_MODULE_QUIET_IE,
+ WLAN_MODULE_SHMEM_MGR,
+ WLAN_MODULE_CFIR,
+ WLAN_MODULE_CODE_COVER,
+ WLAN_MODULE_SHO,
+ WLAN_MODULE_MLO_MGR,
+ WLAN_MODULE_PEER_INIT,
+ WLAN_MODULE_STA_MLO_PS,
+
+ WLAN_MODULE_ID_MAX,
+ WLAN_MODULE_ID_INVALID = WLAN_MODULE_ID_MAX,
+};
+
+enum fw_dbglog_log_level {
+ ATH11K_FW_DBGLOG_ML = 0,
+ ATH11K_FW_DBGLOG_VERBOSE = 0,
+ ATH11K_FW_DBGLOG_INFO,
+ ATH11K_FW_DBGLOG_INFO_LVL_1,
+ ATH11K_FW_DBGLOG_INFO_LVL_2,
+ ATH11K_FW_DBGLOG_WARN,
+ ATH11K_FW_DBGLOG_ERR,
+ ATH11K_FW_DBGLOG_LVL_MAX
+};
+
+struct ath11k_fw_dbglog {
+ enum wmi_debug_log_param param;
+ union {
+ struct {
+ /* log_level values are given in enum fw_dbglog_log_level */
+ u16 log_level;
+ /* module_id values are given in enum fw_dbglog_wlan_module_id */
+ u16 module_id;
+ };
+ /* value is either log_level&module_id/vdev_id/vdev_id_bitmap/log_level
+ * according to param
+ */
+ u32 value;
+ };
+};
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+int ath11k_debugfs_soc_create(struct ath11k_base *ab);
+void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
+int ath11k_debugfs_pdev_create(struct ath11k_base *ab);
+void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab);
+int ath11k_debugfs_register(struct ath11k *ar);
+void ath11k_debugfs_unregister(struct ath11k *ar);
+void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
+
+void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
+int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id);
+
+static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
+}
+
+static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
+}
+
+static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
+ ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
+}
+
+static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_tx_stats;
+}
+
+static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+{
+ return ar->debug.rx_filter;
+}
+
+void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng);
+
+#else
+static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debugfs_register(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_unregister(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_debugfs_fw_stats_process(struct ath11k *ar,
+ struct ath11k_fw_stats *stats)
+{
+}
+
+static inline void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
+{
+}
+
+static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return false;
+}
+
+static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
+ u32 pdev_id, u32 vdev_id, u32 stats_id)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+{
+}
+
+static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
+{
+}
+
+static inline void
+ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
+ enum wmi_direct_buffer_module id,
+ enum ath11k_dbg_dbr_event event,
+ struct hal_srng *srng)
+{
+}
+#endif /* CONFIG_ATH11K_DEBUGFS*/
+
+#endif /* _ATH11K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
new file mode 100644
index 0000000000..0207fc4910
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -0,0 +1,4904 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+
+#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
+
+#define HTT_TLV_HDR_LEN 4
+
+#define PRINT_ARRAY_TO_BUF(out, buflen, arr, str, len, newline) \
+ do { \
+ int index = 0; u8 i; const char *str_val = str; \
+ const char *new_line = newline; \
+ if (str_val) { \
+ index += scnprintf((out + buflen), \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen), \
+ "%s = ", str_val); \
+ } \
+ for (i = 0; i < len; i++) { \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ " %u:%u,", i, arr[i]); \
+ } \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ "%s", new_line); \
+ buflen += index; \
+ } while (0)
+
+static inline void htt_print_stats_string_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_string_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+
+ tag_len = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n");
+
+ len += scnprintf(buf + len, buf_len - len,
+ "data = ");
+ for (i = 0; i < tag_len; i++) {
+ len += scnprintf(buf + len,
+ buf_len - len,
+ "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
+ }
+ /* New lines are added for better display */
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+ htt_stats_buf->underrun);
+ len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+ htt_stats_buf->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+ htt_stats_buf->hw_flush);
+ len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+ htt_stats_buf->hw_filt);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+ htt_stats_buf->mpdu_requeued);
+ len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+ htt_stats_buf->tx_xretry);
+ len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+ htt_stats_buf->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+ htt_stats_buf->mpdu_dropped_xretry);
+ len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+ htt_stats_buf->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+ htt_stats_buf->cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+ htt_stats_buf->tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+ htt_stats_buf->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+ htt_stats_buf->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+ htt_stats_buf->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+ htt_stats_buf->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+ htt_stats_buf->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+ htt_stats_buf->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+ htt_stats_buf->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "mu_seq_posted = %u\n",
+ htt_stats_buf->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+ htt_stats_buf->seq_switch_hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+ htt_stats_buf->next_seq_posted_dsr);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+ htt_stats_buf->seq_posted_isr);
+ len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+ htt_stats_buf->seq_ctrl_cached);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+ htt_stats_buf->mpdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+ htt_stats_buf->msdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+ htt_stats_buf->mpdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+ htt_stats_buf->msdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+ htt_stats_buf->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+ htt_stats_buf->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+ htt_stats_buf->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+ htt_stats_buf->mpdus_expired);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+ htt_stats_buf->mpdus_seq_hw_retry);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt_valid);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_total_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_data_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_freed);
+ len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+ htt_stats_buf->local_data_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+ htt_stats_buf->local_data_freed);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+ htt_stats_buf->mpdu_tried);
+ len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+ htt_stats_buf->isr_wait_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+ htt_stats_buf->tx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n\n",
+ htt_stats_buf->tx_active_dur_us_high);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_urrn_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->urrn_stats, "urrn_stats",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_flush_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->flush_errs, "flush_errs",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_status, "sifs_status",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_phy_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_errs, "phy_errs",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sifs_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_hist_status,
+ "sifs_hist_status", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u\n",
+ htt_stats_buf->num_data_ppdus_legacy_su);
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su);
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ax_su);
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su_txbf);
+
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n\n",
+ htt_stats_buf->num_data_ppdus_ax_su_txbf);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
+ memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
+ HTT_STATS_MAX_HW_INTR_NAME_LEN);
+ len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", hw_intr_name);
+ len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+ htt_stats_buf->mask);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_wd_timeout_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n");
+ memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
+ HTT_STATS_MAX_HW_MODULE_NAME_LEN);
+ len += scnprintf(buf + len, buf_len - len, "hw_module_name = %s\n",
+ hw_module_name);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_pdev_errs_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+ htt_stats_buf->tx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+ htt_stats_buf->rx_abort);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+ htt_stats_buf->rx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+ htt_stats_buf->warm_reset);
+ len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+ htt_stats_buf->cold_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+ htt_stats_buf->tx_flush);
+ len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+ htt_stats_buf->tx_glb_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+ htt_stats_buf->tx_txq_reset);
+ len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
+ htt_stats_buf->rx_timeout_reset);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_msdu_flow_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "last_update_timestamp = %u\n",
+ htt_stats_buf->last_update_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_add_timestamp = %u\n",
+ htt_stats_buf->last_add_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_remove_timestamp = %u\n",
+ htt_stats_buf->last_remove_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "total_processed_msdu_count = %u\n",
+ htt_stats_buf->total_processed_msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u\n",
+ htt_stats_buf->cur_msdu_count_in_flowq);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "tx_flow_no = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TX_FLOW_NO,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TID_NUM,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "drop_rule = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_DROP_RULE,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_enqueue_count = %u\n",
+ htt_stats_buf->last_cycle_enqueue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_dequeue_count = %u\n",
+ htt_stats_buf->last_cycle_dequeue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_drop_count = %u\n",
+ htt_stats_buf->last_cycle_drop_count);
+ len += scnprintf(buf + len, buf_len - len, "current_drop_th = %u\n\n",
+ htt_stats_buf->current_drop_th);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n\n",
+ htt_stats_buf->block_module_id);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tid_stats_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n");
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_bytes = %u\n",
+ htt_stats_buf->max_qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_n_msdus = %u\n",
+ htt_stats_buf->max_qdepth_n_msdus);
+ len += scnprintf(buf + len, buf_len - len, "rsvd = %u\n",
+ htt_stats_buf->rsvd);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n",
+ htt_stats_buf->block_module_id);
+ len += scnprintf(buf + len, buf_len - len, "allow_n_flags = 0x%x\n",
+ htt_stats_buf->allow_n_flags);
+ len += scnprintf(buf + len, buf_len - len, "sendn_frms_allowed = %u\n\n",
+ htt_stats_buf->sendn_frms_allowed);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_tid_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ char tid_name[MAX_HTT_TID_NAME + 1] = {0};
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "dup_in_reorder = %u\n",
+ htt_stats_buf->dup_in_reorder);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_outside_window = %u\n",
+ htt_stats_buf->dup_past_outside_window);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_within_window = %u\n",
+ htt_stats_buf->dup_past_within_window);
+ len += scnprintf(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n\n",
+ htt_stats_buf->rxdesc_err_decrypt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_counter_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_counter_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_COUNTER_TLV:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->counter_name,
+ "counter_name",
+ HTT_MAX_COUNTER_NAME, "\n");
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ppdu_cnt = %u\n",
+ htt_stats_buf->ppdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt = %u\n",
+ htt_stats_buf->mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "msdu_cnt = %u\n",
+ htt_stats_buf->msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "pause_bitmap = %u\n",
+ htt_stats_buf->pause_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "block_bitmap = %u\n",
+ htt_stats_buf->block_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "last_rssi = %d\n",
+ htt_stats_buf->rssi);
+ len += scnprintf(buf + len, buf_len - len, "enqueued_count = %llu\n",
+ htt_stats_buf->peer_enqueued_count_low |
+ ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dequeued_count = %llu\n",
+ htt_stats_buf->peer_dequeued_count_low |
+ ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dropped_count = %llu\n",
+ htt_stats_buf->peer_dropped_count_low |
+ ((u64)htt_stats_buf->peer_dropped_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu\n",
+ htt_stats_buf->ppdu_transmitted_bytes_low |
+ ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "ttl_removed_count = %u\n",
+ htt_stats_buf->peer_ttl_removed_count);
+ len += scnprintf(buf + len, buf_len - len, "inactive_time = %u\n\n",
+ htt_stats_buf->inactive_time);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_peer_details_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_details_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "peer_type = %u\n",
+ htt_stats_buf->peer_type);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_VDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_PDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "ast_idx = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_AST_IDX,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len,
+ "mac_addr = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->mac_addr.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->mac_addr.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "peer_flags = 0x%x\n",
+ htt_stats_buf->peer_flags);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_flags = 0x%x\n\n",
+ htt_stats_buf->qpeer_flags);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_mcs, "tx_su_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mu_mcs, "tx_mu_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PEER_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_peer_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PEER_STATS_NUM_BW_COUNTERS, "\n");
+ }
+
+ for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u\n",
+ htt_stats_buf->mu_mimo_err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n\n",
+ htt_stats_buf->mu_mimo_ampdu_underrun_usr);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_mu_mimo_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ /* TODO: HKDBG */
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "xretry = %u\n",
+ htt_stats_buf->xretry);
+ len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n",
+ htt_stats_buf->underrun_cnt);
+ len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n",
+ htt_stats_buf->flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n",
+ htt_stats_buf->filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n",
+ htt_stats_buf->null_mpdu_bmap);
+ len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n",
+ htt_stats_buf->user_ack_failure);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n",
+ htt_stats_buf->sched_id_proc);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n",
+ htt_stats_buf->null_mpdu_tx_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n",
+ htt_stats_buf->mpdu_bmap_not_recvd);
+ len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n",
+ htt_stats_buf->num_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n",
+ htt_stats_buf->mpdu_tried_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n",
+ htt_stats_buf->mpdu_queued_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n",
+ htt_stats_buf->mpdu_ack_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n",
+ htt_stats_buf->mpdu_filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n",
+ htt_stats_buf->false_mpdu_ack_count);
+ len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n\n",
+ htt_stats_buf->txq_timeout);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_difs_latency_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "hist_intvl = %u\n",
+ htt_stats_buf->hist_intvl);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->difs_latency_hist,
+ "difs_latency_hist", data_len, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 data_len;
+
+ data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_result, "cmd_result",
+ data_len, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_cmd_stall_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_stall_status,
+ "cmd_stall_status", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_fes_result_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems;
+
+ num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fes_result, "fes_result",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u32 num_elements = ((tag_len -
+ sizeof(htt_stats_buf->hist_bin_size)) >> 2);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_hwq_txop_used_cnt_hist_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u32 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->txop_used_cnt_hist,
+ "txop_used_cnt_hist", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ s32 i;
+ const struct htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ const u32 *cbf_20 = htt_stats_buf->cbf_20;
+ const u32 *cbf_40 = htt_stats_buf->cbf_40;
+ const u32 *cbf_80 = htt_stats_buf->cbf_80;
+ const u32 *cbf_160 = htt_stats_buf->cbf_160;
+
+ if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ } else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n",
+ htt_stats_buf->su_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n",
+ htt_stats_buf->delayed_bar_1);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n",
+ htt_stats_buf->delayed_bar_2);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n",
+ htt_stats_buf->delayed_bar_3);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n",
+ htt_stats_buf->delayed_bar_4);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n",
+ htt_stats_buf->delayed_bar_5);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n",
+ htt_stats_buf->delayed_bar_6);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n",
+ htt_stats_buf->delayed_bar_7);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa = %u\n",
+ htt_stats_buf->ac_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp = %u\n",
+ htt_stats_buf->ac_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_3);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa = %u\n",
+ htt_stats_buf->ax_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp = %u\n",
+ htt_stats_buf->ax_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_3);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_4);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_5);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_6);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_7);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
+ htt_stats_buf->ax_basic_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger = %u\n",
+ htt_stats_buf->ax_ulmumimo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
+ htt_stats_buf->ax_bsr_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ac_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n",
+ htt_stats_buf->ac_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n",
+ htt_stats_buf->ac_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brp3_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_selfgen_ax_err_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n",
+ htt_stats_buf->ax_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n",
+ htt_stats_buf->ax_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp3_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp4_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp5_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp6_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp7_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
+ htt_stats_buf->ax_basic_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_ulmumimo_trigger_err = %u\n",
+ htt_stats_buf->ax_ulmumimo_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
+ htt_stats_buf->ax_bsr_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger_err);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mu_mimo_sch_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_posted_per_group_index %u = %u\n",
+ i, htt_stats_buf->ac_mu_mimo_sch_posted_per_grp_sz[i]);
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_posted_per_group_index %u = %u\n",
+ i, htt_stats_buf->ax_mu_mimo_sch_posted_per_grp_sz[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_basic_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_basic_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_bsr_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_bsr_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_sch_bar_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_bar_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_ofdma_brp_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_ofdma_brp_sch_nusers[i]);
+ }
+
+ len += scnprintf(buf + len, buf_len - len, "\n11ax UL MUMIO SCH STATS:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_basic_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_mumimo_basic_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ul_mumimo_brp_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ul_mumimo_brp_sch_nusers[i]);
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
+ if (!htt_stats_buf->user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
+ if (!htt_stats_buf->user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index <
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
+ if (!htt_stats_buf->user_index)
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+
+ if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
+ }
+ }
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_posted_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_posted,
+ "sched_cmd_posted", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_cmd_reaped_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_reaped,
+ "sched_cmd_reaped", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_order_su_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_order_su_num_entries =
+ min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_order_su, "sched_order_su",
+ sched_order_su_num_entries, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sched_txq_sched_ineligibility_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ /* each entry is u32, i.e. 4 bytes */
+ u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_ineligibility,
+ "sched_ineligibility", sched_ineligibility_num_entries,
+ "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "txq_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+ htt_stats_buf->sched_policy);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_posted_timestamp);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_compl_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+ htt_stats_buf->sched_2_tac_lwm_count);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+ htt_stats_buf->sched_2_tac_ring_full);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+ htt_stats_buf->sched_cmd_post_failure);
+ len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+ htt_stats_buf->num_active_tids);
+ len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+ htt_stats_buf->num_ps_schedules);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+ htt_stats_buf->sched_cmds_pending);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+ htt_stats_buf->num_tid_register);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+ htt_stats_buf->num_tid_unregister);
+ len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+ htt_stats_buf->num_qstats_queried);
+ len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+ htt_stats_buf->qstats_update_pending);
+ len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+ htt_stats_buf->last_qstats_query_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+ htt_stats_buf->num_tqm_cmdq_full);
+ len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_de_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_rt_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_tqm_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n\n",
+ htt_stats_buf->notify_sched);
+ len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n\n",
+ htt_stats_buf->dur_based_sendn_term);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
+ htt_stats_buf->current_timestamp);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_gen_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->gen_mpdu_end_reason,
+ "gen_mpdu_end_reason", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_end_reason,
+ "list_mpdu_end_reason", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_list_mpdu_cnt_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_cnt_hist,
+ "list_mpdu_cnt_hist", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_pdev_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+ htt_stats_buf->msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+ htt_stats_buf->mpdu_count);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+ htt_stats_buf->remove_msdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+ htt_stats_buf->remove_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+ htt_stats_buf->remove_msdu_ttl);
+ len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+ htt_stats_buf->send_bar);
+ len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+ htt_stats_buf->bar_sync);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+ htt_stats_buf->notify_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+ htt_stats_buf->hwsch_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+ htt_stats_buf->gen_list_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_tried_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+ htt_stats_buf->remove_msdu_ttl_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+ htt_stats_buf->enqueue);
+ len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+ htt_stats_buf->enqueue_notify);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+ htt_stats_buf->notify_mpdu_at_head);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+ htt_stats_buf->notify_mpdu_state_valid);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+ htt_stats_buf->sched_udp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+ htt_stats_buf->sched_udp_notify2);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+ htt_stats_buf->sched_nonudp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
+ htt_stats_buf->sched_nonudp_notify2);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+ htt_stats_buf->max_cmdq_id);
+ len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+ htt_stats_buf->list_mpdu_cnt_hist_intvl);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+ htt_stats_buf->add_msdu);
+ len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+ htt_stats_buf->q_empty);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+ htt_stats_buf->q_not_empty);
+ len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+ htt_stats_buf->drop_notification);
+ len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n\n",
+ htt_stats_buf->desc_threshold);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+ htt_stats_buf->q_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+ htt_stats_buf->q_not_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
+ htt_stats_buf->add_msdu_failure);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_tqm_cmdq_status_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_MAC_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "cmdq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_msduq_cmd = %u\n\n",
+ htt_stats_buf->update_msduq_cmd);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_eapol_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
+ htt_stats_buf->m1_packets);
+ len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
+ htt_stats_buf->m2_packets);
+ len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
+ htt_stats_buf->m3_packets);
+ len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
+ htt_stats_buf->m4_packets);
+ len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
+ htt_stats_buf->g1_packets);
+ len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n\n",
+ htt_stats_buf->g2_packets);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_failed_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
+ htt_stats_buf->ap_bss_peer_not_found);
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
+ htt_stats_buf->ap_bcast_mcast_no_peer);
+ len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
+ htt_stats_buf->sta_delete_in_progress);
+ len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
+ htt_stats_buf->ibss_no_bss_peer);
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
+ htt_stats_buf->invalid_vdev_type);
+ len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
+ htt_stats_buf->invalid_ast_peer_entry);
+ len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
+ htt_stats_buf->peer_entry_invalid);
+ len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
+ htt_stats_buf->ethertype_not_ip);
+ len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
+ htt_stats_buf->eapol_lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
+ htt_stats_buf->qpeer_not_allow_data);
+ len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
+ htt_stats_buf->fse_tid_override);
+ len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
+ htt_stats_buf->ipv6_jumbogram_zero_length);
+ len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n\n",
+ htt_stats_buf->qos_to_non_qos_in_prog);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
+ htt_stats_buf->arp_packets);
+ len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
+ htt_stats_buf->igmp_packets);
+ len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
+ htt_stats_buf->dhcp_packets);
+ len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
+ htt_stats_buf->host_inspected);
+ len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
+ htt_stats_buf->htt_included);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
+ htt_stats_buf->htt_valid_mcs);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
+ htt_stats_buf->htt_valid_nss);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
+ htt_stats_buf->htt_valid_preamble_type);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
+ htt_stats_buf->htt_valid_chainmask);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
+ htt_stats_buf->htt_valid_guard_interval);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
+ htt_stats_buf->htt_valid_retries);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
+ htt_stats_buf->htt_valid_bw_info);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
+ htt_stats_buf->htt_valid_power);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
+ htt_stats_buf->htt_valid_key_flags);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
+ htt_stats_buf->htt_valid_no_encryption);
+ len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
+ htt_stats_buf->fse_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
+ htt_stats_buf->fse_priority_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
+ htt_stats_buf->fse_priority_high);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
+ htt_stats_buf->fse_priority_low);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_over_sub);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_bursty);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_interactive);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_periodic);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
+ htt_stats_buf->fse_hwqueue_alloc);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
+ htt_stats_buf->fse_hwqueue_created);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
+ htt_stats_buf->fse_hwqueue_send_to_host);
+ len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
+ htt_stats_buf->mcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
+ htt_stats_buf->bcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
+ htt_stats_buf->htt_update_peer_cache);
+ len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
+ htt_stats_buf->htt_learning_frame);
+ len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
+ htt_stats_buf->fse_invalid_peer);
+ len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n",
+ htt_stats_buf->mec_notify);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_classify_status_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
+ htt_stats_buf->eok);
+ len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
+ htt_stats_buf->classify_done);
+ len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
+ htt_stats_buf->lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
+ htt_stats_buf->send_host_dhcp);
+ len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
+ htt_stats_buf->send_host_mcast);
+ len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
+ htt_stats_buf->send_host_unknown_dest);
+ len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
+ htt_stats_buf->send_host);
+ len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n",
+ htt_stats_buf->status_invalid);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_packets_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
+ htt_stats_buf->enqueued_pkts);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
+ htt_stats_buf->to_tqm);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n",
+ htt_stats_buf->to_tqm_bypass);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_enqueue_discard_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
+ htt_stats_buf->discarded_pkts);
+ len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
+ htt_stats_buf->local_frames);
+ len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n",
+ htt_stats_buf->is_ext_msdu);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_compl_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
+ htt_stats_buf->tcl_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
+ htt_stats_buf->tqm_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
+ htt_stats_buf->tqm_notify_frame);
+ len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
+ htt_stats_buf->fw2wbm_enq);
+ len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n",
+ htt_stats_buf->tqm_bypass_frame);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_fw2wbm_ring_full_hist_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw2wbm_ring_full_hist,
+ "fw2wbm_ring_full_hist", num_elements, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_de_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
+ htt_stats_buf->tcl2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
+ htt_stats_buf->not_to_fw);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
+ htt_stats_buf->invalid_pdev_vdev_peer);
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
+ htt_stats_buf->tcl_res_invalid_addrx);
+ len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
+ htt_stats_buf->wbm2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n\n",
+ htt_stats_buf->invalid_pdev);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "base_addr = %u\n",
+ htt_stats_buf->base_addr);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_elems = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_NUM_ELEMS,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HEAD_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_TAIL_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_HEAD_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_TAIL_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "num_tail_incr = %u\n",
+ htt_stats_buf->num_tail_incr);
+ len += scnprintf(buf + len, buf_len - len, "lwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_LWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "hwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "overrun_hit_count = %u\n",
+ htt_stats_buf->overrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "underrun_hit_count = %u\n",
+ htt_stats_buf->underrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "prod_blockwait_count = %u\n",
+ htt_stats_buf->prod_blockwait_count);
+ len += scnprintf(buf + len, buf_len - len, "cons_blockwait_count = %u\n",
+ htt_stats_buf->cons_blockwait_count);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->low_wm_hit_count,
+ "low_wm_hit_count", HTT_STATS_LOW_WM_BINS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->high_wm_hit_count,
+ "high_wm_hit_count", HTT_STATS_HIGH_WM_BINS, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_ring_if_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_user_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dwords_used_by_user_n,
+ "dwords_used_by_user_n", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_client_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_client_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "client_id = %u\n",
+ htt_stats_buf->client_id);
+ len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n",
+ htt_stats_buf->buf_min);
+ len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n",
+ htt_stats_buf->buf_max);
+ len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n",
+ htt_stats_buf->buf_busy);
+ len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n",
+ htt_stats_buf->buf_alloc);
+ len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n",
+ htt_stats_buf->buf_avail);
+ len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n",
+ htt_stats_buf->num_users);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sfm_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n",
+ htt_stats_buf->buf_total);
+ len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n",
+ htt_stats_buf->mem_empty);
+ len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n",
+ htt_stats_buf->deallocate_bufs);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_MAC_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ring_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_RING_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "arena = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_ARENA,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ep = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_EP,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n",
+ htt_stats_buf->base_addr_lsb);
+ len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n",
+ htt_stats_buf->base_addr_msb);
+ len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n",
+ htt_stats_buf->ring_size);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_avail_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_AVAIL_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "num_valid_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_VALID_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "head_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_HEAD_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "tail_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_TAIL_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "consumer_empty = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_CONSUMER_EMPTY,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "producer_full = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PRODUCER_FULL,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_count = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PREFETCH_COUNT,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %lu\n\n",
+ FIELD_GET(HTT_SRING_STATS_INTERNAL_TAIL_PTR,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_sring_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 j;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ac_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ax_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
+ htt_stats_buf->ofdma_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+ htt_stats_buf->rts_success);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_cck_rate[0],
+ htt_stats_buf->tx_legacy_cck_rate[1],
+ htt_stats_buf->tx_legacy_cck_rate[2],
+ htt_stats_buf->tx_legacy_cck_rate[3]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_ofdm_rate[0],
+ htt_stats_buf->tx_legacy_ofdm_rate[1],
+ htt_stats_buf->tx_legacy_ofdm_rate[2],
+ htt_stats_buf->tx_legacy_ofdm_rate[3],
+ htt_stats_buf->tx_legacy_ofdm_rate[4],
+ htt_stats_buf->tx_legacy_ofdm_rate[5],
+ htt_stats_buf->tx_legacy_ofdm_rate[6],
+ htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_mcs,
+ "ac_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs,
+ "ax_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs, "ofdma_tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_nss,
+ "ac_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_nss,
+ "ax_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_nss, "ofdma_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_bw,
+ "ac_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_bw,
+ "ax_mu_mimo_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_bw, "ofdma_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
+ htt_stats_buf->tx_he_ltf[1],
+ htt_stats_buf->tx_he_ltf[2],
+ htt_stats_buf->tx_he_ltf[3]);
+
+ /* SU GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ /* AC MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ac_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ /* AX MU-MIMO GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ax_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ /* DL OFDMA GI Stats */
+ for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, (buf_len - len),
+ "ofdma_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u8 i, j;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+ len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
+ htt_stats_buf->rssi_in_dbm);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
+ htt_stats_buf->nss_count);
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
+ htt_stats_buf->pilot_count);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db[%u] = ", j);
+ for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_pilot_evm_db[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db_mean = ");
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,", i,
+ htt_stats_buf->rx_pilot_evm_db_mean[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ }
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
+ htt_stats_buf->rx_11ax_su_ext);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
+ htt_stats_buf->rx_11ac_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
+ htt_stats_buf->rx_11ax_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ofdma);
+ len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
+ htt_stats_buf->txbf);
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_su_ndpa = %u",
+ htt_stats_buf->rx_su_ndpa);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_su_txbf_mcs,
+ "rx_11ax_su_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_mu_ndpa = %u",
+ htt_stats_buf->rx_mu_ndpa);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_mu_txbf_mcs,
+ "rx_11ax_mu_txbf_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "\nrx_br_poll = %u",
+ htt_stats_buf->rx_br_poll);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
+ "rx_legacy_cck_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_ofdm_rate,
+ "rx_legacy_ofdm_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
+ htt_stats_buf->rx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
+ htt_stats_buf->rx_active_dur_us_high);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ul_ofdma);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs,
+ "ul_ofdma_rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_nss,
+ "ul_ofdma_rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_bw, "ul_ofdma_rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+ htt_stats_buf->ul_ofdma_rx_stbc);
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+ htt_stats_buf->ul_ofdma_rx_ldpc);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ "rx_ulofdma_non_data_ppdu",
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_ppdu,
+ "rx_ulofdma_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_ok,
+ "rx_ulofdma_mpdu_ok", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_fail,
+ "rx_ulofdma_mpdu_fail", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = ", j);
+ for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_nusers,
+ "rx_ulofdma_non_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_nusers,
+ "rx_ulofdma_data_nusers", HTT_RX_PDEV_MAX_OFDMA_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_mcs,
+ "rx_11ax_dl_ofdma_mcs", HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_11ax_dl_ofdma_ru,
+ "rx_11ax_dl_ofdma_ru", HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_non_data_ppdu,
+ "rx_ulmumimo_non_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_data_ppdu,
+ "rx_ulmumimo_data_ppdu", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_ok,
+ "rx_ulmumimo_mpdu_ok", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulmumimo_mpdu_fail,
+ "rx_ulmumimo_mpdu_fail", HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER,
+ "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
+ htt_stats_buf->per_chain_rssi_pkt_type);
+
+ for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] = ", j);
+ for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u\n",
+ htt_stats_buf->fw_reo_ring_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_bcmc);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_free_buf_indication_cnt = %u\n",
+ htt_stats_buf->ofld_remote_free_buf_indication_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_buf_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "reo_fw_ring_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len, "wbm_sw_ring_reap = %u\n",
+ htt_stats_buf->wbm_sw_ring_reap);
+ len += scnprintf(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u\n",
+ htt_stats_buf->wbm_forward_to_host_cnt);
+ len += scnprintf(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u\n",
+ htt_stats_buf->wbm_target_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "target_refill_ring_recycle_cnt = %u\n",
+ htt_stats_buf->target_refill_ring_recycle_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_empty_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_empty_cnt,
+ "refill_ring_empty_cnt", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rxdma_err, "rxdma_err",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reo_err, "reo_err",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_reo_resource_stats_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sample_id = %u\n",
+ htt_stats_buf->sample_id);
+ len += scnprintf(buf + len, buf_len - len, "total_max = %u\n",
+ htt_stats_buf->total_max);
+ len += scnprintf(buf + len, buf_len - len, "total_avg = %u\n",
+ htt_stats_buf->total_avg);
+ len += scnprintf(buf + len, buf_len - len, "total_sample = %u\n",
+ htt_stats_buf->total_sample);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_avg = %u\n",
+ htt_stats_buf->non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_sample = %u\n",
+ htt_stats_buf->non_zeros_sample);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_max = %u\n",
+ htt_stats_buf->last_non_zeros_max);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_min %u\n",
+ htt_stats_buf->last_non_zeros_min);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_avg %u\n",
+ htt_stats_buf->last_non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_sample %u\n\n",
+ htt_stats_buf->last_non_zeros_sample);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_num_refill,
+ "refill_ring_num_refill", num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n",
+ htt_stats_buf->ppdu_recvd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_ok);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_err);
+ len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_ack_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n",
+ htt_stats_buf->udp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n",
+ htt_stats_buf->other_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n",
+ htt_stats_buf->fw_ring_mpdu_ind);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mgmt_subtype,
+ "fw_ring_mgmt_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_ctrl_subtype,
+ "fw_ring_ctrl_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_mcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_bcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_ucast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_null_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n",
+ htt_stats_buf->fw_ring_mpdu_drop);
+ len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n",
+ htt_stats_buf->drx_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "drx_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->drx_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n",
+ htt_stats_buf->local_nondata_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n",
+ htt_stats_buf->local_nondata_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "mon_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_update_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_full_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n",
+ htt_stats_buf->rx_suspend_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n",
+ htt_stats_buf->rx_suspend_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n",
+ htt_stats_buf->rx_resume_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n",
+ htt_stats_buf->rx_resume_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n",
+ htt_stats_buf->rx_ring_switch_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n",
+ htt_stats_buf->rx_ring_restore_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ htt_stats_buf->rx_flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n\n",
+ htt_stats_buf->rx_recovery_reset_cnt);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mpdu_err,
+ "fw_ring_mpdu_err", HTT_RX_STATS_RXDMA_MAX_ERR, "\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_mpdu_drop_tlv_v *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_mpdu_drop, "fw_mpdu_drop",
+ num_elems, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_rx_pdev_fw_stats_phy_err_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id__word = %u\n",
+ htt_stats_buf->mac_id__word);
+ len += scnprintf(buf + len, buf_len - len, "total_phy_err_nct = %u\n",
+ htt_stats_buf->total_phy_err_cnt);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_err, "phy_errs",
+ HTT_STATS_PHY_ERR_MAX, "\n\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_cca_stats_hist_v1_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ htt_stats_buf->chan_num);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n",
+ htt_stats_buf->valid_cca_counters_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n",
+ htt_stats_buf->collection_interval);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|\n");
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_cca_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|\n",
+ htt_stats_buf->tx_frame_usec,
+ htt_stats_buf->rx_frame_usec,
+ htt_stats_buf->rx_clear_usec,
+ htt_stats_buf->my_rx_frame_usec,
+ htt_stats_buf->usec_cnt,
+ htt_stats_buf->med_rx_idle_usec,
+ htt_stats_buf->med_tx_idle_global_usec,
+ htt_stats_buf->cca_obss_usec);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+ htt_stats_buf->last_unpause_ppdu_id);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+ htt_stats_buf->hwsch_unpause_wait_tqm_write);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+ htt_stats_buf->hwsch_dummy_tlv_skipped);
+ len += scnprintf(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u\n",
+ htt_stats_buf->hwsch_misaligned_offset_received);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+ htt_stats_buf->hwsch_reset_count);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+ htt_stats_buf->hwsch_dev_reset_war);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_long_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+ htt_stats_buf->sch_rx_ppdu_no_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+ htt_stats_buf->sch_selfgen_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
+ htt_stats_buf->sch_rx_sifs_resp_trigger);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_sessions_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "num_sessions = %u\n\n",
+ htt_stats_buf->num_sessions);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_stats_twt_session_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+ htt_stats_buf->vdev_id);
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->peer_mac.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->peer_mac.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "flow_id_flags = %u\n",
+ htt_stats_buf->flow_id_flags);
+ len += scnprintf(buf + len, buf_len - len, "dialog_id = %u\n",
+ htt_stats_buf->dialog_id);
+ len += scnprintf(buf + len, buf_len - len, "wake_dura_us = %u\n",
+ htt_stats_buf->wake_dura_us);
+ len += scnprintf(buf + len, buf_len - len, "wake_intvl_us = %u\n",
+ htt_stats_buf->wake_intvl_us);
+ len += scnprintf(buf + len, buf_len - len, "sp_offset_us = %u\n\n",
+ htt_stats_buf->sp_offset_us);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_obss_pd_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx success PPDU = %u\n",
+ htt_stats_buf->num_obss_tx_ppdu_success);
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
+ htt_stats_buf->num_obss_tx_ppdu_failure);
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
+ htt_stats_buf->num_non_srg_opportunities);
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
+ htt_stats_buf->num_non_srg_ppdu_tried);
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
+ htt_stats_buf->num_non_srg_ppdu_success);
+ len += scnprintf(buf + len, buf_len - len, "SRG Opportunities = %u\n",
+ htt_stats_buf->num_srg_opportunities);
+ len += scnprintf(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
+ htt_stats_buf->num_srg_ppdu_tried);
+ len += scnprintf(buf + len, buf_len - len, "SRG success PPDU = %u\n\n",
+ htt_stats_buf->num_srg_ppdu_success);
+
+ if (len >= buf_len)
+ buf[buf_len - 1] = 0;
+ else
+ buf[len] = 0;
+
+ stats_req->buf_len = len;
+}
+
+static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
+ u8 *data)
+{
+ struct debug_htt_stats_req *stats_req =
+ (struct debug_htt_stats_req *)data;
+ struct htt_ring_backpressure_stats_tlv *htt_stats_buf =
+ (struct htt_ring_backpressure_stats_tlv *)tag_buf;
+ int i;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "current_head_idx = %u\n",
+ htt_stats_buf->current_head_idx);
+ len += scnprintf(buf + len, buf_len - len, "current_tail_idx = %u\n",
+ htt_stats_buf->current_tail_idx);
+ len += scnprintf(buf + len, buf_len - len, "num_htt_msgs_sent = %u\n",
+ htt_stats_buf->num_htt_msgs_sent);
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_time_ms = %u\n",
+ htt_stats_buf->backpressure_time_ms);
+
+ for (i = 0; i < 5; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_hist_%u = %u\n",
+ i + 1, htt_stats_buf->backpressure_hist[i]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "============================\n");
+
+ if (len >= buf_len) {
+ buf[buf_len - 1] = 0;
+ stats_req->buf_len = buf_len - 1;
+ } else {
+ buf[len] = 0;
+ stats_req->buf_len = len;
+ }
+}
+
+static inline
+void htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "tx_ol_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_mcs =");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndpa_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndpa_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDPA_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_brp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_brp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_BRP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_num_cbf_rcvd_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err_num_cbf_rcvd[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_steer_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_steer_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_STEER_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_steer_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_steer[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_ol_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_ol[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_prefetch_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_prefetch[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_sound[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_force_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_force_sound[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ htt_stats_buf->rx_ofdma_timing_err_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ htt_stats_buf->rx_cck_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ htt_stats_buf->mactx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ htt_stats_buf->macrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ htt_stats_buf->phytx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_defer_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_lstf_event_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_non_legacy_cnt);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len, "rx_pkt_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CRC_PASS_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_pkt_crc_pass_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_crc_pass_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_PER_BLK_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "per_blk_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->per_blk_err_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_OTA_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ota_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_ota_err_cnt[i]);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "nf_chain[%d] = %d\n",
+ i, htt_stats_buf->nf_chain[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u\n",
+ htt_stats_buf->false_radar_cnt);
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ htt_stats_buf->radar_cs_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n",
+ htt_stats_buf->ani_level);
+ len += scnprintf(buf + len, buf_len - len, "fw_run_time = %u\n",
+ htt_stats_buf->fw_run_time);
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_phy_reset_counters_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
+ htt_stats_buf->cf_active_low_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
+ htt_stats_buf->cf_active_low_pass_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
+ htt_stats_buf->phy_off_through_vreg_cnt);
+ len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
+ htt_stats_buf->force_calibration_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
+ htt_stats_buf->rf_mode_switch_phy_off_cnt);
+
+ stats_req->buf_len = len;
+}
+
+static inline void
+htt_print_phy_reset_stats_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
+ htt_stats_buf->chan_mhz);
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
+ htt_stats_buf->chan_band_center_freq1);
+ len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
+ htt_stats_buf->chan_band_center_freq2);
+ len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
+ htt_stats_buf->chan_phy_mode);
+ len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
+ htt_stats_buf->chan_flags);
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ htt_stats_buf->chan_num);
+ len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
+ htt_stats_buf->reset_cause);
+ len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
+ htt_stats_buf->prev_reset_cause);
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
+ htt_stats_buf->phy_warm_reset_src);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
+ htt_stats_buf->rx_gain_tbl_mode);
+ len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
+ htt_stats_buf->xbar_val);
+ len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
+ htt_stats_buf->force_calibration);
+ len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
+ htt_stats_buf->phyrf_mode);
+ len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
+ htt_stats_buf->phy_homechan);
+ len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
+ htt_stats_buf->phy_tx_ch_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
+ htt_stats_buf->phy_rx_ch_mask);
+ len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
+ htt_stats_buf->phybb_ini_mask);
+ len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
+ htt_stats_buf->phyrf_ini_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
+ htt_stats_buf->phy_dfs_en_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
+ htt_stats_buf->phy_sscan_en_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
+ htt_stats_buf->phy_synth_sel_mask);
+ len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
+ htt_stats_buf->phy_adfs_freq);
+ len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
+ htt_stats_buf->cck_fir_settings);
+ len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
+ htt_stats_buf->phy_dyn_pri_chan);
+ len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
+ htt_stats_buf->cca_thresh);
+ len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
+ htt_stats_buf->dyn_cca_status);
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
+ htt_stats_buf->rxdesense_thresh_hw);
+ len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n",
+ htt_stats_buf->rxdesense_thresh_sw);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_ctrl_path_txrx_stats_tlv *htt_stat_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+ const char *mgmt_frm_type[ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1] = {
+ "assoc_req", "assoc_resp",
+ "reassoc_req", "reassoc_resp",
+ "probe_req", "probe_resp",
+ "timing_advertisement", "reserved",
+ "beacon", "atim", "disassoc",
+ "auth", "deauth", "action", "action_no_ack"};
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ htt_stat_buf->peer_mac_addr[0], htt_stat_buf->peer_mac_addr[1],
+ htt_stat_buf->peer_mac_addr[2], htt_stat_buf->peer_mac_addr[3],
+ htt_stat_buf->peer_mac_addr[4], htt_stat_buf->peer_mac_addr[5]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_tx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_rx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *tag_buf,
+ void *user_data)
+{
+ struct debug_htt_stats_req *stats_req = user_data;
+
+ switch (tag) {
+ case HTT_STATS_TX_PDEV_CMN_TAG:
+ htt_print_tx_pdev_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+ htt_print_tx_pdev_stats_urrn_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_TAG:
+ htt_print_tx_pdev_stats_sifs_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_FLUSH_TAG:
+ htt_print_tx_pdev_stats_flush_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_PHY_ERR_TAG:
+ htt_print_tx_pdev_stats_phy_err_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+ htt_print_tx_pdev_stats_sifs_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG:
+ htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(tag_buf, len,
+ stats_req);
+ break;
+
+ case HTT_STATS_STRING_TAG:
+ htt_print_stats_string_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMN_TAG:
+ htt_print_tx_hwq_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG:
+ htt_print_tx_hwq_difs_latency_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_RESULT_TAG:
+ htt_print_tx_hwq_cmd_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_CMD_STALL_TAG:
+ htt_print_tx_hwq_cmd_stall_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_FES_STATUS_TAG:
+ htt_print_tx_hwq_fes_result_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG:
+ htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG:
+ htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+ htt_print_tx_tqm_gen_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+ htt_print_tx_tqm_list_mpdu_stats_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+ htt_print_tx_tqm_list_mpdu_cnt_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMN_TAG:
+ htt_print_tx_tqm_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_PDEV_TAG:
+ htt_print_tx_tqm_pdev_stats_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_CMDQ_STATUS_TAG:
+ htt_print_tx_tqm_cmdq_status_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_EAPOL_PACKETS_TAG:
+ htt_print_tx_de_eapol_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG:
+ htt_print_tx_de_classify_failed_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATS_TAG:
+ htt_print_tx_de_classify_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG:
+ htt_print_tx_de_classify_status_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG:
+ htt_print_tx_de_enqueue_packets_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG:
+ htt_print_tx_de_enqueue_discard_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG:
+ htt_print_tx_de_fw2wbm_ring_full_hist_tlv(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_CMN_TAG:
+ htt_print_tx_de_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_TAG:
+ htt_print_ring_if_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CMN_TAG:
+ htt_print_sfm_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SRING_STATS_TAG:
+ htt_print_sring_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_STATS_TAG:
+ htt_print_rx_pdev_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG:
+ htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG:
+ htt_print_rx_pdev_fw_mpdu_drop_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_STATS_TAG:
+ htt_print_rx_soc_fw_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG:
+ htt_print_rx_soc_fw_refill_ring_empty_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_RX_REFILL_RXDMA_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REFILL_REO_ERR_TAG:
+ htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(
+ tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RX_REO_RESOURCE_STATS_TAG:
+ htt_print_rx_reo_debug_stats_tlv_v(
+ tag_buf, stats_req);
+ break;
+ case HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG:
+ htt_print_rx_pdev_fw_stats_phy_err_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
+ htt_print_tx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
+ htt_print_rx_pdev_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+ htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TX_SCHED_CMN_TAG:
+ htt_print_stats_tx_sched_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_PDEV_MPDU_STATS_TAG:
+ htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+ htt_print_sched_txq_cmd_posted_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_RING_IF_CMN_TAG:
+ htt_print_ring_if_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_USER_TAG:
+ htt_print_sfm_client_user_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SFM_CLIENT_TAG:
+ htt_print_sfm_client_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+ htt_print_tx_tqm_error_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+ htt_print_sched_txq_cmd_reaped_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SRING_CMN_TAG:
+ htt_print_sring_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SOUNDING_STATS_TAG:
+ htt_print_tx_sounding_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ac_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_CMN_STATS_TAG:
+ htt_print_tx_selfgen_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AC_STATS_TAG:
+ htt_print_tx_selfgen_ac_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_STATS_TAG:
+ htt_print_tx_selfgen_ax_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG:
+ htt_print_tx_selfgen_ax_err_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_sch_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG:
+ htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_INTR_MISC_TAG:
+ htt_print_hw_stats_intr_misc_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_WD_TIMEOUT_TAG:
+ htt_print_hw_stats_wd_timeout_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_HW_PDEV_ERRS_TAG:
+ htt_print_hw_stats_pdev_errs_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_COUNTER_NAME_TAG:
+ htt_print_counter_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_TAG:
+ htt_print_tx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_TID_DETAILS_V1_TAG:
+ htt_print_tx_tid_stats_v1_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_RX_TID_DETAILS_TAG:
+ htt_print_rx_tid_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_STATS_CMN_TAG:
+ htt_print_peer_stats_cmn_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_DETAILS_TAG:
+ htt_print_peer_details_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_MSDU_FLOWQ_TAG:
+ htt_print_msdu_flow_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_TX_RATE_STATS_TAG:
+ htt_print_tx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PEER_RX_RATE_STATS_TAG:
+ htt_print_rx_peer_rate_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_TX_DE_COMPL_STATS_TAG:
+ htt_print_tx_de_compl_stats_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_1SEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG:
+ case HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG:
+ htt_print_pdev_cca_stats_hist_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
+ htt_print_pdev_stats_cca_counters_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_WHAL_TX_TAG:
+ htt_print_hw_stats_whal_tx_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSIONS_TAG:
+ htt_print_pdev_stats_twt_sessions_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_TWT_SESSION_TAG:
+ htt_print_pdev_stats_twt_session_tlv(tag_buf, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+ htt_print_sched_txq_sched_order_su_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+ htt_print_sched_txq_sched_ineligibility_tlv_v(tag_buf, len, stats_req);
+ break;
+
+ case HTT_STATS_PDEV_OBSS_PD_TAG:
+ htt_print_pdev_obss_pd_stats_tlv_v(tag_buf, stats_req);
+ break;
+ case HTT_STATS_RING_BACKPRESSURE_STATS_TAG:
+ htt_print_backpressure_stats_tlv_v(tag_buf, user_data);
+ break;
+ case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+ htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG:
+ htt_print_txbf_ofdma_ndpa_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG:
+ htt_print_txbf_ofdma_ndp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG:
+ htt_print_txbf_ofdma_brp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG:
+ htt_print_txbf_ofdma_steer_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_COUNTERS_TAG:
+ htt_print_phy_counters_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_STATS_TAG:
+ htt_print_phy_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_COUNTERS_TAG:
+ htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PHY_RESET_STATS_TAG:
+ htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:
+ htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_extd_stats_msg *msg;
+ struct debug_htt_stats_req *stats_req;
+ struct ath11k *ar;
+ u32 len;
+ u64 cookie;
+ int ret;
+ bool send_completion = false;
+ u8 pdev_id;
+
+ msg = (struct ath11k_htt_extd_stats_msg *)skb->data;
+ cookie = msg->cookie;
+
+ if (FIELD_GET(HTT_STATS_COOKIE_MSB, cookie) != HTT_STATS_MAGIC_VALUE) {
+ ath11k_warn(ab, "received invalid htt ext stats event\n");
+ return;
+ }
+
+ pdev_id = FIELD_GET(HTT_STATS_COOKIE_LSB, cookie);
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ rcu_read_unlock();
+ if (!ar) {
+ ath11k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+ return;
+ }
+
+ stats_req = ar->debug.htt_stats.stats_req;
+ if (!stats_req)
+ return;
+
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+
+ stats_req->done = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_DONE, msg->info1);
+ if (stats_req->done)
+ send_completion = true;
+
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+
+ len = FIELD_GET(HTT_T2H_EXT_STATS_INFO1_LENGTH, msg->info1);
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_dbg_htt_ext_stats_parse,
+ stats_req);
+ if (ret)
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+ if (send_completion)
+ complete(&stats_req->cmpln);
+}
+
+static ssize_t ath11k_read_htt_stats_type(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.type);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS)
+ return -E2BIG;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ ar->debug.htt_stats.type = type;
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+ .read = ath11k_read_htt_stats_type,
+ .write = ath11k_write_htt_stats_type,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
+ const u8 *mac_addr,
+ struct htt_ext_stats_cfg_params *cfg_params)
+{
+ if (!cfg_params)
+ return -EINVAL;
+
+ switch (type) {
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ:
+ case ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_HWQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_TXQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_CMDQS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_INFO:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg0 |= FIELD_PREP(GENMASK(15, 1),
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+ cfg_params->cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg3 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO:
+ case ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ALL_RINGS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
+ cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
+ break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
+{
+ struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+ u8 type = stats_req->type;
+ u64 cookie = 0;
+ int ret, pdev_id = ar->pdev->pdev_id;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+
+ init_completion(&stats_req->cmpln);
+
+ stats_req->done = false;
+ stats_req->pdev_id = pdev_id;
+
+ cookie = FIELD_PREP(HTT_STATS_COOKIE_MSB, HTT_STATS_MAGIC_VALUE) |
+ FIELD_PREP(HTT_STATS_COOKIE_LSB, pdev_id);
+
+ ret = ath11k_prep_htt_stats_cfg_params(ar, type, stats_req->peer_addr,
+ &cfg_params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set htt stats cfg params: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+
+ while (!wait_for_completion_timeout(&stats_req->cmpln, 3 * HZ)) {
+ spin_lock_bh(&ar->debug.htt_stats.lock);
+ if (!stats_req->done) {
+ stats_req->done = true;
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ ath11k_warn(ar->ab, "stats request timed out\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->debug.htt_stats.lock);
+ }
+
+ return 0;
+}
+
+static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct debug_htt_stats_req *stats_req;
+ u8 type = ar->debug.htt_stats.type;
+ int ret;
+
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS)
+ return -EPERM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ if (ar->debug.htt_stats.stats_req) {
+ ret = -EAGAIN;
+ goto err_unlock;
+ }
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+
+ ret = ath11k_debugfs_htt_stats_req(ar);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+out:
+ vfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_release_htt_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+
+ mutex_lock(&ar->conf_mutex);
+ vfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_htt_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+ .open = ath11k_open_htt_stats,
+ .release = ath11k_release_htt_stats,
+ .read = ath11k_read_htt_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_htt_stats_reset(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", ar->debug.htt_stats.reset);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_htt_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u8 type;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ int ret;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+ cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH11K_DBG_HTT_EXT_STATS_RESET,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ ar->debug.htt_stats.reset = type;
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+ .read = ath11k_read_htt_stats_reset,
+ .write = ath11k_write_htt_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_debugfs_htt_stats_init(struct ath11k *ar)
+{
+ spin_lock_init(&ar->debug.htt_stats.lock);
+ debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_type);
+ debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+ ar, &fops_dump_htt_stats);
+ debugfs_create_file("htt_stats_reset", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_reset);
+}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
new file mode 100644
index 0000000000..96219301f0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -0,0 +1,2045 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
+#define HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
+#define HTT_STATS_MAGIC_VALUE 0xF0F0F0F0
+
+enum htt_tlv_tag_t {
+ HTT_STATS_TX_PDEV_CMN_TAG = 0,
+ HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
+ HTT_STATS_TX_PDEV_SIFS_TAG = 2,
+ HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
+ HTT_STATS_TX_PDEV_PHY_ERR_TAG = 4,
+ HTT_STATS_STRING_TAG = 5,
+ HTT_STATS_TX_HWQ_CMN_TAG = 6,
+ HTT_STATS_TX_HWQ_DIFS_LATENCY_TAG = 7,
+ HTT_STATS_TX_HWQ_CMD_RESULT_TAG = 8,
+ HTT_STATS_TX_HWQ_CMD_STALL_TAG = 9,
+ HTT_STATS_TX_HWQ_FES_STATUS_TAG = 10,
+ HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
+ HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
+ HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
+ HTT_STATS_TX_TQM_CMN_TAG = 14,
+ HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_TQM_CMDQ_STATUS_TAG = 16,
+ HTT_STATS_TX_DE_EAPOL_PACKETS_TAG = 17,
+ HTT_STATS_TX_DE_CLASSIFY_FAILED_TAG = 18,
+ HTT_STATS_TX_DE_CLASSIFY_STATS_TAG = 19,
+ HTT_STATS_TX_DE_CLASSIFY_STATUS_TAG = 20,
+ HTT_STATS_TX_DE_ENQUEUE_PACKETS_TAG = 21,
+ HTT_STATS_TX_DE_ENQUEUE_DISCARD_TAG = 22,
+ HTT_STATS_TX_DE_CMN_TAG = 23,
+ HTT_STATS_RING_IF_TAG = 24,
+ HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG = 25,
+ HTT_STATS_SFM_CMN_TAG = 26,
+ HTT_STATS_SRING_STATS_TAG = 27,
+ HTT_STATS_RX_PDEV_FW_STATS_TAG = 28,
+ HTT_STATS_RX_PDEV_FW_RING_MPDU_ERR_TAG = 29,
+ HTT_STATS_RX_PDEV_FW_MPDU_DROP_TAG = 30,
+ HTT_STATS_RX_SOC_FW_STATS_TAG = 31,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_EMPTY_TAG = 32,
+ HTT_STATS_RX_SOC_FW_REFILL_RING_NUM_REFILL_TAG = 33,
+ HTT_STATS_TX_PDEV_RATE_STATS_TAG = 34,
+ HTT_STATS_RX_PDEV_RATE_STATS_TAG = 35,
+ HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
+ HTT_STATS_TX_SCHED_CMN_TAG = 37,
+ HTT_STATS_TX_PDEV_MUMIMO_MPDU_STATS_TAG = 38,
+ HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
+ HTT_STATS_RING_IF_CMN_TAG = 40,
+ HTT_STATS_SFM_CLIENT_USER_TAG = 41,
+ HTT_STATS_SFM_CLIENT_TAG = 42,
+ HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
+ HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
+ HTT_STATS_SRING_CMN_TAG = 45,
+ HTT_STATS_TX_SELFGEN_AC_ERR_STATS_TAG = 46,
+ HTT_STATS_TX_SELFGEN_CMN_STATS_TAG = 47,
+ HTT_STATS_TX_SELFGEN_AC_STATS_TAG = 48,
+ HTT_STATS_TX_SELFGEN_AX_STATS_TAG = 49,
+ HTT_STATS_TX_SELFGEN_AX_ERR_STATS_TAG = 50,
+ HTT_STATS_TX_HWQ_MUMIMO_SCH_STATS_TAG = 51,
+ HTT_STATS_TX_HWQ_MUMIMO_MPDU_STATS_TAG = 52,
+ HTT_STATS_TX_HWQ_MUMIMO_CMN_STATS_TAG = 53,
+ HTT_STATS_HW_INTR_MISC_TAG = 54,
+ HTT_STATS_HW_WD_TIMEOUT_TAG = 55,
+ HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_COUNTER_NAME_TAG = 57,
+ HTT_STATS_TX_TID_DETAILS_TAG = 58,
+ HTT_STATS_RX_TID_DETAILS_TAG = 59,
+ HTT_STATS_PEER_STATS_CMN_TAG = 60,
+ HTT_STATS_PEER_DETAILS_TAG = 61,
+ HTT_STATS_PEER_TX_RATE_STATS_TAG = 62,
+ HTT_STATS_PEER_RX_RATE_STATS_TAG = 63,
+ HTT_STATS_PEER_MSDU_FLOWQ_TAG = 64,
+ HTT_STATS_TX_DE_COMPL_STATS_TAG = 65,
+ HTT_STATS_WHAL_TX_TAG = 66,
+ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
+ HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG = 68,
+ HTT_STATS_TX_TID_DETAILS_V1_TAG = 69,
+ HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70,
+ HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71,
+ HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72,
+ HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73,
+ HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74,
+ HTT_STATS_PDEV_TWT_SESSIONS_TAG = 75,
+ HTT_STATS_PDEV_TWT_SESSION_TAG = 76,
+ HTT_STATS_RX_REFILL_RXDMA_ERR_TAG = 77,
+ HTT_STATS_RX_REFILL_REO_ERR_TAG = 78,
+ HTT_STATS_RX_REO_RESOURCE_STATS_TAG = 79,
+ HTT_STATS_TX_SOUNDING_STATS_TAG = 80,
+ HTT_STATS_TX_PDEV_TX_PPDU_STATS_TAG = 81,
+ HTT_STATS_TX_PDEV_TRIED_MPDU_CNT_HIST_TAG = 82,
+ HTT_STATS_TX_HWQ_TRIED_MPDU_CNT_HIST_TAG = 83,
+ HTT_STATS_TX_HWQ_TXOP_USED_CNT_HIST_TAG = 84,
+ HTT_STATS_TX_DE_FW2WBM_RING_FULL_HIST_TAG = 85,
+ HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
+ HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
+ HTT_STATS_PDEV_OBSS_PD_TAG = 88,
+ HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_RING_BACKPRESSURE_STATS_TAG = 90,
+ HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG = 101,
+ HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
+ HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG = 113,
+ HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG = 114,
+ HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG = 115,
+ HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG = 116,
+ HTT_STATS_PHY_COUNTERS_TAG = 121,
+ HTT_STATS_PHY_STATS_TAG = 122,
+ HTT_STATS_PHY_RESET_COUNTERS_TAG = 123,
+ HTT_STATS_PHY_RESET_STATS_TAG = 124,
+
+ HTT_STATS_MAX_TAG,
+};
+
+#define HTT_STATS_MAX_STRING_SZ32 4
+#define HTT_STATS_MACID_INVALID 0xff
+#define HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS 10
+#define HTT_TX_HWQ_MAX_CMD_RESULT_STATS 13
+#define HTT_TX_HWQ_MAX_CMD_STALL_STATS 5
+#define HTT_TX_HWQ_MAX_FES_RESULT_STATS 10
+
+enum htt_tx_pdev_underrun_enum {
+ HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2,
+ HTT_TX_PDEV_MAX_URRN_STATS = 3,
+};
+
+#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 71
+#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define HTT_TX_PDEV_MAX_PHY_ERR_STATS 18
+#define HTT_TX_PDEV_SCHED_TX_MODE_MAX 4
+#define HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
+
+#define HTT_RX_STATS_REFILL_MAX_RING 4
+#define HTT_RX_STATS_RXDMA_MAX_ERR 16
+#define HTT_RX_STATS_FW_DROP_REASON_MAX 16
+
+/* Bytes stored in little endian order */
+/* Length should be multiple of DWORD */
+struct htt_stats_string_tlv {
+ /* Can be variable length */
+ DECLARE_FLEX_ARRAY(u32, data);
+} __packed;
+
+#define HTT_STATS_MAC_ID GENMASK(7, 0)
+
+/* == TX PDEV STATS == */
+struct htt_tx_pdev_stats_cmn_tlv {
+ u32 mac_id__word;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 underrun;
+ u32 hw_paused;
+ u32 hw_flush;
+ u32 hw_filt;
+ u32 tx_abort;
+ u32 mpdu_requeued;
+ u32 tx_xretry;
+ u32 data_rc;
+ u32 mpdu_dropped_xretry;
+ u32 illgl_rate_phy_err;
+ u32 cont_xretry;
+ u32 tx_timeout;
+ u32 pdev_resets;
+ u32 phy_underrun;
+ u32 txop_ovf;
+ u32 seq_posted;
+ u32 seq_failed_queueing;
+ u32 seq_completed;
+ u32 seq_restarted;
+ u32 mu_seq_posted;
+ u32 seq_switch_hw_paused;
+ u32 next_seq_posted_dsr;
+ u32 seq_posted_isr;
+ u32 seq_ctrl_cached;
+ u32 mpdu_count_tqm;
+ u32 msdu_count_tqm;
+ u32 mpdu_removed_tqm;
+ u32 msdu_removed_tqm;
+ u32 mpdus_sw_flush;
+ u32 mpdus_hw_filter;
+ u32 mpdus_truncated;
+ u32 mpdus_ack_failed;
+ u32 mpdus_expired;
+ u32 mpdus_seq_hw_retry;
+ u32 ack_tlv_proc;
+ u32 coex_abort_mpdu_cnt_valid;
+ u32 coex_abort_mpdu_cnt;
+ u32 num_total_ppdus_tried_ota;
+ u32 num_data_ppdus_tried_ota;
+ u32 local_ctrl_mgmt_enqued;
+ u32 local_ctrl_mgmt_freed;
+ u32 local_data_enqued;
+ u32 local_data_freed;
+ u32 mpdu_tried;
+ u32 isr_wait_seq_posted;
+
+ u32 tx_active_dur_us_low;
+ u32 tx_active_dur_us_high;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_urrn_tlv_v {
+ /* HTT_TX_PDEV_MAX_URRN_STATS */
+ DECLARE_FLEX_ARRAY(u32, urrn_stats);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_flush_tlv_v {
+ /* HTT_TX_PDEV_MAX_FLUSH_REASON_STATS */
+ DECLARE_FLEX_ARRAY(u32, flush_errs);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_tlv_v {
+ /* HTT_TX_PDEV_MAX_SIFS_BURST_STATS */
+ DECLARE_FLEX_ARRAY(u32, sifs_status);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_phy_err_tlv_v {
+ /* HTT_TX_PDEV_MAX_PHY_ERR_STATS */
+ DECLARE_FLEX_ARRAY(u32, phy_errs);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_pdev_stats_sifs_hist_tlv_v {
+ /* HTT_TX_PDEV_SIFS_BURST_HIST_STATS */
+ DECLARE_FLEX_ARRAY(u32, sifs_hist_status);
+};
+
+struct htt_tx_pdev_stats_tx_ppdu_stats_tlv_v {
+ u32 num_data_ppdus_legacy_su;
+ u32 num_data_ppdus_ac_su;
+ u32 num_data_ppdus_ax_su;
+ u32 num_data_ppdus_ac_su_txbf;
+ u32 num_data_ppdus_ax_su_txbf;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size .
+ *
+ * Tried_mpdu_cnt_hist is the histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the
+ * HW had attempted to transmit on air, for the HWSCH Schedule
+ * command submitted by FW.It is not the retry attempts.
+ * The histogram bins are 0-29, 30-59, 60-89 and so on. The are
+ * 10 bins in this histogram. They are defined in FW using the
+ * following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_PDEV_TRIED_MPDU_CNT_HIST */
+};
+
+/* == SOC ERROR STATS == */
+
+/* =============== PDEV ERROR STATS ============== */
+#define HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct htt_hw_stats_intr_misc_tlv {
+ /* Stored as little endian */
+ u8 hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN];
+ u32 mask;
+ u32 count;
+};
+
+#define HTT_STATS_MAX_HW_MODULE_NAME_LEN 8
+struct htt_hw_stats_wd_timeout_tlv {
+ /* Stored as little endian */
+ u8 hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN];
+ u32 count;
+};
+
+struct htt_hw_stats_pdev_errs_tlv {
+ u32 mac_id__word; /* BIT [ 7 : 0] : mac_id */
+ u32 tx_abort;
+ u32 tx_abort_fail_count;
+ u32 rx_abort;
+ u32 rx_abort_fail_count;
+ u32 warm_reset;
+ u32 cold_reset;
+ u32 tx_flush;
+ u32 tx_glb_reset;
+ u32 tx_txq_reset;
+ u32 rx_timeout_reset;
+};
+
+struct htt_hw_stats_whal_tx_tlv {
+ u32 mac_id__word;
+ u32 last_unpause_ppdu_id;
+ u32 hwsch_unpause_wait_tqm_write;
+ u32 hwsch_dummy_tlv_skipped;
+ u32 hwsch_misaligned_offset_received;
+ u32 hwsch_reset_count;
+ u32 hwsch_dev_reset_war;
+ u32 hwsch_delayed_pause;
+ u32 hwsch_long_delayed_pause;
+ u32 sch_rx_ppdu_no_response;
+ u32 sch_selfgen_response;
+ u32 sch_rx_sifs_resp_trigger;
+};
+
+/* ============ PEER STATS ============ */
+#define HTT_MSDU_FLOW_STATS_TX_FLOW_NO GENMASK(15, 0)
+#define HTT_MSDU_FLOW_STATS_TID_NUM GENMASK(19, 16)
+#define HTT_MSDU_FLOW_STATS_DROP_RULE BIT(20)
+
+struct htt_msdu_flow_stats_tlv {
+ u32 last_update_timestamp;
+ u32 last_add_timestamp;
+ u32 last_remove_timestamp;
+ u32 total_processed_msdu_count;
+ u32 cur_msdu_count_in_flowq;
+ u32 sw_peer_id;
+ u32 tx_flow_no__tid_num__drop_rule;
+ u32 last_cycle_enqueue_count;
+ u32 last_cycle_dequeue_count;
+ u32 last_cycle_drop_count;
+ u32 current_drop_th;
+};
+
+#define MAX_HTT_TID_NAME 8
+
+#define HTT_TX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
+/* Tidq stats */
+struct htt_tx_tid_stats_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 hw_queued;
+ u32 hw_reaped;
+ u32 mpdus_hw_filter;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+};
+
+#define HTT_TX_TID_STATS_V1_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_V1_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
+/* Tidq stats */
+struct htt_tx_tid_stats_v1_tlv {
+ /* Stored as little endian */
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 sw_peer_id__tid_num;
+ u32 num_sched_pending__num_ppdu_in_hwq;
+ u32 tid_flags;
+ u32 max_qdepth_bytes;
+ u32 max_qdepth_n_msdus;
+ u32 rsvd;
+
+ u32 qdepth_bytes;
+ u32 qdepth_num_msdu;
+ u32 qdepth_num_mpdu;
+ u32 last_scheduled_tsmp;
+ u32 pause_module_id;
+ u32 block_module_id;
+ u32 tid_tx_airtime;
+ u32 allow_n_flags;
+ u32 sendn_frms_allowed;
+};
+
+#define HTT_RX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_RX_TID_STATS_TID_NUM GENMASK(31, 16)
+
+struct htt_rx_tid_stats_tlv {
+ u32 sw_peer_id__tid_num;
+ u8 tid_name[MAX_HTT_TID_NAME];
+ u32 dup_in_reorder;
+ u32 dup_past_outside_window;
+ u32 dup_past_within_window;
+ u32 rxdesc_err_decrypt;
+ u32 tid_rx_airtime;
+};
+
+#define HTT_MAX_COUNTER_NAME 8
+struct htt_counter_tlv {
+ u8 counter_name[HTT_MAX_COUNTER_NAME];
+ u32 count;
+};
+
+struct htt_peer_stats_cmn_tlv {
+ u32 ppdu_cnt;
+ u32 mpdu_cnt;
+ u32 msdu_cnt;
+ u32 pause_bitmap;
+ u32 block_bitmap;
+ u32 current_timestamp;
+ u32 peer_tx_airtime;
+ u32 peer_rx_airtime;
+ s32 rssi;
+ u32 peer_enqueued_count_low;
+ u32 peer_enqueued_count_high;
+ u32 peer_dequeued_count_low;
+ u32 peer_dequeued_count_high;
+ u32 peer_dropped_count_low;
+ u32 peer_dropped_count_high;
+ u32 ppdu_transmitted_bytes_low;
+ u32 ppdu_transmitted_bytes_high;
+ u32 peer_ttl_removed_count;
+ u32 inactive_time;
+};
+
+#define HTT_PEER_DETAILS_VDEV_ID GENMASK(7, 0)
+#define HTT_PEER_DETAILS_PDEV_ID GENMASK(15, 8)
+#define HTT_PEER_DETAILS_AST_IDX GENMASK(31, 16)
+
+struct htt_peer_details_tlv {
+ u32 peer_type;
+ u32 sw_peer_id;
+ u32 vdev_pdev_ast_idx;
+ struct htt_mac_addr mac_addr;
+ u32 peer_flags;
+ u32 qpeer_flags;
+};
+
+enum htt_stats_param_type {
+ HTT_STATS_PREAM_OFDM,
+ HTT_STATS_PREAM_CCK,
+ HTT_STATS_PREAM_HT,
+ HTT_STATS_PREAM_VHT,
+ HTT_STATS_PREAM_HE,
+ HTT_STATS_PREAM_RSVD,
+ HTT_STATS_PREAM_RSVD1,
+
+ HTT_STATS_PREAM_COUNT,
+};
+
+#define HTT_TX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_tx_peer_rate_stats_tlv {
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_su_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PEER_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets in each GI
+ * (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS][HTT_TX_PEER_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PEER_STATS_NUM_DCM_COUNTERS];
+
+};
+
+#define HTT_RX_PEER_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PEER_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PEER_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+
+struct htt_rx_peer_rate_stats_tlv {
+ u32 nsts;
+
+ /* Number of rx ldpc packets */
+ u32 rx_ldpc;
+ /* Number of rx rts packets */
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PEER_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PEER_STATS_NUM_PREAMBLE_TYPES];
+ /* units = dB above noise floor */
+ u8 rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PEER_STATS_NUM_BW_COUNTERS];
+
+ /* Counters to track number of rx packets in each GI in each mcs (0-11) */
+ u32 rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PEER_STATS_NUM_MCS_COUNTERS];
+};
+
+enum htt_peer_stats_req_mode {
+ HTT_PEER_STATS_REQ_MODE_NO_QUERY,
+ HTT_PEER_STATS_REQ_MODE_QUERY_TQM,
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM,
+};
+
+enum htt_peer_stats_tlv_enum {
+ HTT_PEER_STATS_CMN_TLV = 0,
+ HTT_PEER_DETAILS_TLV = 1,
+ HTT_TX_PEER_RATE_STATS_TLV = 2,
+ HTT_RX_PEER_RATE_STATS_TLV = 3,
+ HTT_TX_TID_STATS_TLV = 4,
+ HTT_RX_TID_STATS_TLV = 5,
+ HTT_MSDU_FLOW_STATS_TLV = 6,
+
+ HTT_PEER_STATS_MAX_TLV = 31,
+};
+
+/* =========== MUMIMO HWQ stats =========== */
+/* MU MIMO stats per hwQ */
+struct htt_tx_hwq_mu_mimo_sch_stats_tlv {
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ u32 mu_mimo_ppdu_posted;
+};
+
+struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+};
+
+#define HTT_TX_HWQ_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_HWQ_STATS_HWQ_ID GENMASK(15, 8)
+
+struct htt_tx_hwq_mu_mimo_cmn_stats_tlv {
+ u32 mac_id__hwq_id__word;
+};
+
+/* == TX HWQ STATS == */
+struct htt_tx_hwq_stats_cmn_tlv {
+ u32 mac_id__hwq_id__word;
+
+ /* PPDU level stats */
+ u32 xretry;
+ u32 underrun_cnt;
+ u32 flush_cnt;
+ u32 filt_cnt;
+ u32 null_mpdu_bmap;
+ u32 user_ack_failure;
+ u32 ack_tlv_proc;
+ u32 sched_id_proc;
+ u32 null_mpdu_tx_count;
+ u32 mpdu_bmap_not_recvd;
+
+ /* Selfgen stats per hwQ */
+ u32 num_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+
+ /* MPDU level stats */
+ u32 mpdu_tried_cnt;
+ u32 mpdu_queued_cnt;
+ u32 mpdu_ack_fail_cnt;
+ u32 mpdu_filt_cnt;
+ u32 false_mpdu_ack_count;
+
+ u32 txq_timeout;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_difs_latency_stats_tlv_v {
+ u32 hist_intvl;
+ /* histogram of ppdu post to hwsch - > cmd status received */
+ u32 difs_latency_hist[]; /* HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_result_stats_tlv_v {
+ /* Histogram of sched cmd result, HTT_TX_HWQ_MAX_CMD_RESULT_STATS */
+ DECLARE_FLEX_ARRAY(u32, cmd_result);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_cmd_stall_stats_tlv_v {
+ /* Histogram of various pause conitions, HTT_TX_HWQ_MAX_CMD_STALL_STATS */
+ DECLARE_FLEX_ARRAY(u32, cmd_stall_status);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_hwq_fes_result_stats_tlv_v {
+ /* Histogram of number of user fes result, HTT_TX_HWQ_MAX_FES_RESULT_STATS */
+ DECLARE_FLEX_ARRAY(u32, fes_result);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The hwq_tried_mpdu_cnt_hist is a histogram of MPDUs tries per HWQ.
+ * The tries here is the count of the MPDUS within a PPDU that the HW
+ * had attempted to transmit on air, for the HWSCH Schedule command
+ * submitted by FW in this HWQ .It is not the retry attempts. The
+ * histogram bins are 0-29, 30-59, 60-89 and so on. The are 10 bins
+ * in this histogram.
+ * they are defined in FW using the following macros
+ * #define WAL_MAX_TRIED_MPDU_CNT_HISTOGRAM 9
+ * #define WAL_TRIED_MPDU_CNT_HISTOGRAM_INTERVAL 30
+ */
+struct htt_tx_hwq_tried_mpdu_cnt_hist_tlv_v {
+ u32 hist_bin_size;
+ /* Histogram of number of mpdus on tried mpdu */
+ u32 tried_mpdu_cnt_hist[]; /* HTT_TX_HWQ_TRIED_MPDU_CNT_HIST */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size
+ *
+ * The txop_used_cnt_hist is the histogram of txop per burst. After
+ * completing the burst, we identify the txop used in the burst and
+ * incr the corresponding bin.
+ * Each bin represents 1ms & we have 10 bins in this histogram.
+ * they are defined in FW using the following macros
+ * #define WAL_MAX_TXOP_USED_CNT_HISTOGRAM 10
+ * #define WAL_TXOP_USED_HISTOGRAM_INTERVAL 1000 ( 1 ms )
+ */
+struct htt_tx_hwq_txop_used_cnt_hist_tlv_v {
+ /* Histogram of txop used cnt, HTT_TX_HWQ_TXOP_USED_CNT_HIST */
+ DECLARE_FLEX_ARRAY(u32, txop_used_cnt_hist);
+};
+
+/* == TX SELFGEN STATS == */
+struct htt_tx_selfgen_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 su_bar;
+ u32 rts;
+ u32 cts2self;
+ u32 qos_null;
+ u32 delayed_bar_1; /* MU user 1 */
+ u32 delayed_bar_2; /* MU user 2 */
+ u32 delayed_bar_3; /* MU user 3 */
+ u32 delayed_bar_4; /* MU user 4 */
+ u32 delayed_bar_5; /* MU user 5 */
+ u32 delayed_bar_6; /* MU user 6 */
+ u32 delayed_bar_7; /* MU user 7 */
+};
+
+struct htt_tx_selfgen_ac_stats_tlv {
+ /* 11AC */
+ u32 ac_su_ndpa;
+ u32 ac_su_ndp;
+ u32 ac_mu_mimo_ndpa;
+ u32 ac_mu_mimo_ndp;
+ u32 ac_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ac_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ac_mu_mimo_brpoll_3; /* MU user 3 */
+};
+
+struct htt_tx_selfgen_ax_stats_tlv {
+ /* 11AX */
+ u32 ax_su_ndpa;
+ u32 ax_su_ndp;
+ u32 ax_mu_mimo_ndpa;
+ u32 ax_mu_mimo_ndp;
+ u32 ax_mu_mimo_brpoll_1; /* MU user 1 */
+ u32 ax_mu_mimo_brpoll_2; /* MU user 2 */
+ u32 ax_mu_mimo_brpoll_3; /* MU user 3 */
+ u32 ax_mu_mimo_brpoll_4; /* MU user 4 */
+ u32 ax_mu_mimo_brpoll_5; /* MU user 5 */
+ u32 ax_mu_mimo_brpoll_6; /* MU user 6 */
+ u32 ax_mu_mimo_brpoll_7; /* MU user 7 */
+ u32 ax_basic_trigger;
+ u32 ax_bsr_trigger;
+ u32 ax_mu_bar_trigger;
+ u32 ax_mu_rts_trigger;
+ u32 ax_ulmumimo_trigger;
+};
+
+struct htt_tx_selfgen_ac_err_stats_tlv {
+ /* 11AC error stats */
+ u32 ac_su_ndp_err;
+ u32 ac_su_ndpa_err;
+ u32 ac_mu_mimo_ndpa_err;
+ u32 ac_mu_mimo_ndp_err;
+ u32 ac_mu_mimo_brp1_err;
+ u32 ac_mu_mimo_brp2_err;
+ u32 ac_mu_mimo_brp3_err;
+};
+
+struct htt_tx_selfgen_ax_err_stats_tlv {
+ /* 11AX error stats */
+ u32 ax_su_ndp_err;
+ u32 ax_su_ndpa_err;
+ u32 ax_mu_mimo_ndpa_err;
+ u32 ax_mu_mimo_ndp_err;
+ u32 ax_mu_mimo_brp1_err;
+ u32 ax_mu_mimo_brp2_err;
+ u32 ax_mu_mimo_brp3_err;
+ u32 ax_mu_mimo_brp4_err;
+ u32 ax_mu_mimo_brp5_err;
+ u32 ax_mu_mimo_brp6_err;
+ u32 ax_mu_mimo_brp7_err;
+ u32 ax_basic_trigger_err;
+ u32 ax_bsr_trigger_err;
+ u32 ax_mu_bar_trigger_err;
+ u32 ax_mu_rts_trigger_err;
+ u32 ax_ulmumimo_trigger_err;
+};
+
+/* == TX MU STATS == */
+#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74
+#define HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS 8
+
+struct htt_tx_pdev_mu_mimo_sch_stats_tlv {
+ /* mu-mimo sw sched cmd stats */
+ u32 mu_mimo_sch_posted;
+ u32 mu_mimo_sch_failed;
+ /* MU PPDU stats per hwQ */
+ u32 mu_mimo_ppdu_posted;
+ /*
+ * Counts the number of users in each transmission of
+ * the given TX mode.
+ *
+ * Index is the number of users - 1.
+ */
+ u32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ u32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+ u32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_bsr_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_bar_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ u32 ax_ul_ofdma_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+
+ /* UL MU-MIMO */
+ /* ax_ul_mumimo_basic_sch_nusers[i] is the number of basic triggers sent
+ * for (i+1) users
+ */
+ u32 ax_ul_mumimo_basic_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+
+ /* ax_ul_mumimo_brp_sch_nusers[i] is the number of brp triggers sent
+ * for (i+1) users
+ */
+ u32 ax_ul_mumimo_brp_sch_nusers[HTT_TX_PDEV_STATS_NUM_UL_MUMIMO_USER_STATS];
+
+ u32 ac_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ u32 ax_mu_mimo_sch_posted_per_grp_sz[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+};
+
+struct htt_tx_pdev_mu_mimo_mpdu_stats_tlv {
+ u32 mu_mimo_mpdus_queued_usr;
+ u32 mu_mimo_mpdus_tried_usr;
+ u32 mu_mimo_mpdus_failed_usr;
+ u32 mu_mimo_mpdus_requeued_usr;
+ u32 mu_mimo_err_no_ba_usr;
+ u32 mu_mimo_mpdu_underrun_usr;
+ u32 mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_mu_mimo_mpdus_queued_usr;
+ u32 ax_mu_mimo_mpdus_tried_usr;
+ u32 ax_mu_mimo_mpdus_failed_usr;
+ u32 ax_mu_mimo_mpdus_requeued_usr;
+ u32 ax_mu_mimo_err_no_ba_usr;
+ u32 ax_mu_mimo_mpdu_underrun_usr;
+ u32 ax_mu_mimo_ampdu_underrun_usr;
+
+ u32 ax_ofdma_mpdus_queued_usr;
+ u32 ax_ofdma_mpdus_tried_usr;
+ u32 ax_ofdma_mpdus_failed_usr;
+ u32 ax_ofdma_mpdus_requeued_usr;
+ u32 ax_ofdma_err_no_ba_usr;
+ u32 ax_ofdma_mpdu_underrun_usr;
+ u32 ax_ofdma_ampdu_underrun_usr;
+};
+
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC 1
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX 2
+#define HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX 3
+
+struct htt_tx_pdev_mpdu_stats_tlv {
+ /* mpdu level stats */
+ u32 mpdus_queued_usr;
+ u32 mpdus_tried_usr;
+ u32 mpdus_failed_usr;
+ u32 mpdus_requeued_usr;
+ u32 err_no_ba_usr;
+ u32 mpdu_underrun_usr;
+ u32 ampdu_underrun_usr;
+ u32 user_index;
+ u32 tx_sched_mode; /* HTT_STATS_TX_SCHED_MODE_xxx */
+};
+
+/* == TX SCHED STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_posted_tlv_v {
+ /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+ DECLARE_FLEX_ARRAY(u32, sched_cmd_posted);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_cmd_reaped_tlv_v {
+ /* HTT_TX_PDEV_SCHED_TX_MODE_MAX */
+ DECLARE_FLEX_ARRAY(u32, sched_cmd_reaped);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_order_su_tlv_v {
+ /* HTT_TX_PDEV_NUM_SCHED_ORDER_LOG */
+ DECLARE_FLEX_ARRAY(u32, sched_order_su);
+};
+
+enum htt_sched_txq_sched_ineligibility_tlv_enum {
+ HTT_SCHED_TID_SKIP_SCHED_MASK_DISABLED = 0,
+ HTT_SCHED_TID_SKIP_NOTIFY_MPDU,
+ HTT_SCHED_TID_SKIP_MPDU_STATE_INVALID,
+ HTT_SCHED_TID_SKIP_SCHED_DISABLED,
+ HTT_SCHED_TID_SKIP_TQM_BYPASS_CMD_PENDING,
+ HTT_SCHED_TID_SKIP_SECOND_SU_SCHEDULE,
+
+ HTT_SCHED_TID_SKIP_CMD_SLOT_NOT_AVAIL,
+ HTT_SCHED_TID_SKIP_NO_ENQ,
+ HTT_SCHED_TID_SKIP_LOW_ENQ,
+ HTT_SCHED_TID_SKIP_PAUSED,
+ HTT_SCHED_TID_SKIP_UL,
+ HTT_SCHED_TID_REMOVE_PAUSED,
+ HTT_SCHED_TID_REMOVE_NO_ENQ,
+ HTT_SCHED_TID_REMOVE_UL,
+ HTT_SCHED_TID_QUERY,
+ HTT_SCHED_TID_SU_ONLY,
+ HTT_SCHED_TID_ELIGIBLE,
+ HTT_SCHED_INELIGIBILITY_MAX,
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sched_txq_sched_ineligibility_tlv_v {
+ /* indexed by htt_sched_txq_sched_ineligibility_tlv_enum */
+ DECLARE_FLEX_ARRAY(u32, sched_ineligibility);
+};
+
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
+
+struct htt_tx_pdev_stats_sched_per_txq_tlv {
+ u32 mac_id__txq_id__word;
+ u32 sched_policy;
+ u32 last_sched_cmd_posted_timestamp;
+ u32 last_sched_cmd_compl_timestamp;
+ u32 sched_2_tac_lwm_count;
+ u32 sched_2_tac_ring_full;
+ u32 sched_cmd_post_failure;
+ u32 num_active_tids;
+ u32 num_ps_schedules;
+ u32 sched_cmds_pending;
+ u32 num_tid_register;
+ u32 num_tid_unregister;
+ u32 num_qstats_queried;
+ u32 qstats_update_pending;
+ u32 last_qstats_query_timestamp;
+ u32 num_tqm_cmdq_full;
+ u32 num_de_sched_algo_trigger;
+ u32 num_rt_sched_algo_trigger;
+ u32 num_tqm_sched_algo_trigger;
+ u32 notify_sched;
+ u32 dur_based_sendn_term;
+};
+
+struct htt_stats_tx_sched_cmn_tlv {
+ /* BIT [ 7 : 0] :- mac_id
+ * BIT [31 : 8] :- reserved
+ */
+ u32 mac_id__word;
+ /* Current timestamp */
+ u32 current_timestamp;
+};
+
+/* == TQM STATS == */
+#define HTT_TX_TQM_MAX_GEN_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16
+#define HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_gen_mpdu_stats_tlv_v {
+ /* HTT_TX_TQM_MAX_GEN_MPDU_END_REASON */
+ DECLARE_FLEX_ARRAY(u32, gen_mpdu_end_reason);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_stats_tlv_v {
+ /* HTT_TX_TQM_MAX_LIST_MPDU_END_REASON */
+ DECLARE_FLEX_ARRAY(u32, list_mpdu_end_reason);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_tx_tqm_list_mpdu_cnt_tlv_v {
+ /* HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS */
+ DECLARE_FLEX_ARRAY(u32, list_mpdu_cnt_hist);
+};
+
+struct htt_tx_tqm_pdev_stats_tlv_v {
+ u32 msdu_count;
+ u32 mpdu_count;
+ u32 remove_msdu;
+ u32 remove_mpdu;
+ u32 remove_msdu_ttl;
+ u32 send_bar;
+ u32 bar_sync;
+ u32 notify_mpdu;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 hwsch_trigger;
+ u32 ack_tlv_proc;
+ u32 gen_mpdu_cmd;
+ u32 gen_list_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_mpdu_tried_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_msdu_cmd;
+ u32 remove_msdu_ttl_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 enqueue;
+ u32 enqueue_notify;
+ u32 notify_mpdu_at_head;
+ u32 notify_mpdu_state_valid;
+ /*
+ * On receiving TQM_FLOW_NOT_EMPTY_STATUS from TQM, (on MSDUs being enqueued
+ * the flow is non empty), if the number of MSDUs is greater than the threshold,
+ * notify is incremented. UDP_THRESH counters are for UDP MSDUs, and NONUDP are
+ * for non-UDP MSDUs.
+ * MSDUQ_SWNOTIFY_UDP_THRESH1 threshold - sched_udp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_UDP_THRESH2 threshold - sched_udp_notify2 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH1 threshold - sched_nonudp_notify1 is incremented
+ * MSDUQ_SWNOTIFY_NONUDP_THRESH2 threshold - sched_nonudp_notify2 is incremented
+ *
+ * Notify signifies that we trigger the scheduler.
+ */
+ u32 sched_udp_notify1;
+ u32 sched_udp_notify2;
+ u32 sched_nonudp_notify1;
+ u32 sched_nonudp_notify2;
+};
+
+struct htt_tx_tqm_cmn_stats_tlv {
+ u32 mac_id__word;
+ u32 max_cmdq_id;
+ u32 list_mpdu_cnt_hist_intvl;
+
+ /* Global stats */
+ u32 add_msdu;
+ u32 q_empty;
+ u32 q_not_empty;
+ u32 drop_notification;
+ u32 desc_threshold;
+};
+
+struct htt_tx_tqm_error_stats_tlv {
+ /* Error stats */
+ u32 q_empty_failure;
+ u32 q_not_empty_failure;
+ u32 add_msdu_failure;
+};
+
+/* == TQM CMDQ stats == */
+#define HTT_TX_TQM_CMDQ_STATUS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID GENMASK(15, 8)
+
+struct htt_tx_tqm_cmdq_status_tlv {
+ u32 mac_id__cmdq_id__word;
+ u32 sync_cmd;
+ u32 write_cmd;
+ u32 gen_mpdu_cmd;
+ u32 mpdu_queue_stats_cmd;
+ u32 mpdu_head_info_cmd;
+ u32 msdu_flow_stats_cmd;
+ u32 remove_mpdu_cmd;
+ u32 remove_msdu_cmd;
+ u32 flush_cache_cmd;
+ u32 update_mpduq_cmd;
+ u32 update_msduq_cmd;
+};
+
+/* == TX-DE STATS == */
+/* Structures for tx de stats */
+struct htt_tx_de_eapol_packets_stats_tlv {
+ u32 m1_packets;
+ u32 m2_packets;
+ u32 m3_packets;
+ u32 m4_packets;
+ u32 g1_packets;
+ u32 g2_packets;
+};
+
+struct htt_tx_de_classify_failed_stats_tlv {
+ u32 ap_bss_peer_not_found;
+ u32 ap_bcast_mcast_no_peer;
+ u32 sta_delete_in_progress;
+ u32 ibss_no_bss_peer;
+ u32 invalid_vdev_type;
+ u32 invalid_ast_peer_entry;
+ u32 peer_entry_invalid;
+ u32 ethertype_not_ip;
+ u32 eapol_lookup_failed;
+ u32 qpeer_not_allow_data;
+ u32 fse_tid_override;
+ u32 ipv6_jumbogram_zero_length;
+ u32 qos_to_non_qos_in_prog;
+};
+
+struct htt_tx_de_classify_stats_tlv {
+ u32 arp_packets;
+ u32 igmp_packets;
+ u32 dhcp_packets;
+ u32 host_inspected;
+ u32 htt_included;
+ u32 htt_valid_mcs;
+ u32 htt_valid_nss;
+ u32 htt_valid_preamble_type;
+ u32 htt_valid_chainmask;
+ u32 htt_valid_guard_interval;
+ u32 htt_valid_retries;
+ u32 htt_valid_bw_info;
+ u32 htt_valid_power;
+ u32 htt_valid_key_flags;
+ u32 htt_valid_no_encryption;
+ u32 fse_entry_count;
+ u32 fse_priority_be;
+ u32 fse_priority_high;
+ u32 fse_priority_low;
+ u32 fse_traffic_ptrn_be;
+ u32 fse_traffic_ptrn_over_sub;
+ u32 fse_traffic_ptrn_bursty;
+ u32 fse_traffic_ptrn_interactive;
+ u32 fse_traffic_ptrn_periodic;
+ u32 fse_hwqueue_alloc;
+ u32 fse_hwqueue_created;
+ u32 fse_hwqueue_send_to_host;
+ u32 mcast_entry;
+ u32 bcast_entry;
+ u32 htt_update_peer_cache;
+ u32 htt_learning_frame;
+ u32 fse_invalid_peer;
+ /*
+ * mec_notify is HTT TX WBM multicast echo check notification
+ * from firmware to host. FW sends SA addresses to host for all
+ * multicast/broadcast packets received on STA side.
+ */
+ u32 mec_notify;
+};
+
+struct htt_tx_de_classify_status_stats_tlv {
+ u32 eok;
+ u32 classify_done;
+ u32 lookup_failed;
+ u32 send_host_dhcp;
+ u32 send_host_mcast;
+ u32 send_host_unknown_dest;
+ u32 send_host;
+ u32 status_invalid;
+};
+
+struct htt_tx_de_enqueue_packets_stats_tlv {
+ u32 enqueued_pkts;
+ u32 to_tqm;
+ u32 to_tqm_bypass;
+};
+
+struct htt_tx_de_enqueue_discard_stats_tlv {
+ u32 discarded_pkts;
+ u32 local_frames;
+ u32 is_ext_msdu;
+};
+
+struct htt_tx_de_compl_stats_tlv {
+ u32 tcl_dummy_frame;
+ u32 tqm_dummy_frame;
+ u32 tqm_notify_frame;
+ u32 fw2wbm_enq;
+ u32 tqm_bypass_frame;
+};
+
+/*
+ * The htt_tx_de_fw2wbm_ring_full_hist_tlv is a histogram of time we waited
+ * for the fw2wbm ring buffer. we are requesting a buffer in FW2WBM release
+ * ring,which may fail, due to non availability of buffer. Hence we sleep for
+ * 200us & again request for it. This is a histogram of time we wait, with
+ * bin of 200ms & there are 10 bin (2 seconds max)
+ * They are defined by the following macros in FW
+ * #define ENTRIES_PER_BIN_COUNT 1000 // per bin 1000 * 200us = 200ms
+ * #define RING_FULL_BIN_ENTRIES (WAL_TX_DE_FW2WBM_ALLOC_TIMEOUT_COUNT /
+ * ENTRIES_PER_BIN_COUNT)
+ */
+struct htt_tx_de_fw2wbm_ring_full_hist_tlv {
+ DECLARE_FLEX_ARRAY(u32, fw2wbm_ring_full_hist);
+};
+
+struct htt_tx_de_cmn_stats_tlv {
+ u32 mac_id__word;
+
+ /* Global Stats */
+ u32 tcl2fw_entry_count;
+ u32 not_to_fw;
+ u32 invalid_pdev_vdev_peer;
+ u32 tcl_res_invalid_addrx;
+ u32 wbm2fw_entry_count;
+ u32 invalid_pdev;
+};
+
+/* == RING-IF STATS == */
+#define HTT_STATS_LOW_WM_BINS 5
+#define HTT_STATS_HIGH_WM_BINS 5
+
+#define HTT_RING_IF_STATS_NUM_ELEMS GENMASK(15, 0)
+#define HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_SHADOW_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_SHADOW_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_LWM_THRESH GENMASK(15, 0)
+#define HTT_RING_IF_STATS_HWM_THRESH GENMASK(31, 16)
+
+struct htt_ring_if_stats_tlv {
+ u32 base_addr; /* DWORD aligned base memory address of the ring */
+ u32 elem_size;
+ u32 num_elems__prefetch_tail_idx;
+ u32 head_idx__tail_idx;
+ u32 shadow_head_idx__shadow_tail_idx;
+ u32 num_tail_incr;
+ u32 lwm_thresh__hwm_thresh;
+ u32 overrun_hit_count;
+ u32 underrun_hit_count;
+ u32 prod_blockwait_count;
+ u32 cons_blockwait_count;
+ u32 low_wm_hit_count[HTT_STATS_LOW_WM_BINS];
+ u32 high_wm_hit_count[HTT_STATS_HIGH_WM_BINS];
+};
+
+struct htt_ring_if_cmn_tlv {
+ u32 mac_id__word;
+ u32 num_records;
+};
+
+/* == SFM STATS == */
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_sfm_client_user_tlv_v {
+ /* Number of DWORDS used per user and per client */
+ DECLARE_FLEX_ARRAY(u32, dwords_used_by_user_n);
+};
+
+struct htt_sfm_client_tlv {
+ /* Client ID */
+ u32 client_id;
+ /* Minimum number of buffers */
+ u32 buf_min;
+ /* Maximum number of buffers */
+ u32 buf_max;
+ /* Number of Busy buffers */
+ u32 buf_busy;
+ /* Number of Allocated buffers */
+ u32 buf_alloc;
+ /* Number of Available/Usable buffers */
+ u32 buf_avail;
+ /* Number of users */
+ u32 num_users;
+};
+
+struct htt_sfm_cmn_tlv {
+ u32 mac_id__word;
+ /* Indicates the total number of 128 byte buffers
+ * in the CMEM that are available for buffer sharing
+ */
+ u32 buf_total;
+ /* Indicates for certain client or all the clients
+ * there is no dowrd saved in SFM, refer to SFM_R1_MEM_EMPTY
+ */
+ u32 mem_empty;
+ /* DEALLOCATE_BUFFERS, refer to register SFM_R0_DEALLOCATE_BUFFERS */
+ u32 deallocate_bufs;
+ /* Number of Records */
+ u32 num_records;
+};
+
+/* == SRNG STATS == */
+#define HTT_SRING_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_SRING_STATS_RING_ID GENMASK(15, 8)
+#define HTT_SRING_STATS_ARENA GENMASK(23, 16)
+#define HTT_SRING_STATS_EP BIT(24)
+#define HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0)
+#define HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16)
+#define HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0)
+#define HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16)
+#define HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0)
+#define HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16)
+#define HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0)
+#define HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16)
+
+struct htt_sring_stats_tlv {
+ u32 mac_id__ring_id__arena__ep;
+ u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */
+ u32 base_addr_msb;
+ u32 ring_size;
+ u32 elem_size;
+
+ u32 num_avail_words__num_valid_words;
+ u32 head_ptr__tail_ptr;
+ u32 consumer_empty__producer_full;
+ u32 prefetch_count__internal_tail_ptr;
+};
+
+struct htt_sring_cmn_tlv {
+ u32 num_records;
+};
+
+/* == PDEV TX RATE CTRL STATS == */
+#define HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_TX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_LTF 4
+
+#define HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
+ (HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
+ HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS)
+
+struct htt_tx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 tx_ldpc;
+ u32 rts_cnt;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+
+ u32 tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 tx_su_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_mu_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 tx_stbc[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 tx_pream[HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+
+ /* Counters to track number of tx packets
+ * in each GI (400us, 800us, 1600us & 3200us) in each mcs (0-11)
+ */
+ u32 tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
+ u32 tx_dcm[HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
+ /* Number of CTS-acknowledged RTS packets */
+ u32 rts_success;
+
+ /*
+ * Counters for legacy 11a and 11b transmissions.
+ *
+ * The index corresponds to:
+ *
+ * CCK: 0: 1 Mbps, 1: 2 Mbps, 2: 5.5 Mbps, 3: 11 Mbps
+ *
+ * OFDM: 0: 6 Mbps, 1: 9 Mbps, 2: 12 Mbps, 3: 18 Mbps,
+ * 4: 24 Mbps, 5: 36 Mbps, 6: 48 Mbps, 7: 54 Mbps
+ */
+ u32 tx_legacy_cck_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 tx_legacy_ofdm_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+
+ u32 ac_mu_mimo_tx_ldpc;
+ u32 ax_mu_mimo_tx_ldpc;
+ u32 ofdma_tx_ldpc;
+
+ /*
+ * Counters for 11ax HE LTF selection during TX.
+ *
+ * The index corresponds to:
+ *
+ * 0: unused, 1: 1x LTF, 2: 2x LTF, 3: 4x LTF
+ */
+ u32 tx_he_ltf[HTT_TX_PDEV_STATS_NUM_LTF];
+
+ u32 ac_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ u32 ac_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ax_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+
+ u32 ac_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ax_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ u32 ac_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ax_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+};
+
+/* == PDEV RX RATE CTRL STATS == */
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS 12
+#define HTT_RX_PDEV_STATS_NUM_GI_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS 5
+#define HTT_RX_PDEV_STATS_NUM_BW_COUNTERS 4
+#define HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+#define HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_RX_PDEV_MAX_OFDMA_NUM_USER 8
+#define HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS 16
+#define HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS 6
+#define HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER 8
+
+struct htt_rx_pdev_rate_stats_tlv {
+ u32 mac_id__word;
+ u32 nsts;
+
+ u32 rx_ldpc;
+ u32 rts_cnt;
+
+ u32 rssi_mgmt; /* units = dB above noise floor */
+ u32 rssi_data; /* units = dB above noise floor */
+ u32 rssi_comb; /* units = dB above noise floor */
+ u32 rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0,1, ...7 -> NSS 1,2, ...8 */
+ u32 rx_nss[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 rx_dcm[HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
+ u32 rx_stbc[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
+ u32 rx_bw[HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 rx_pream[HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+ u8 rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+ /* units = dB above noise floor */
+
+ /* Counters to track number of rx packets
+ * in each GI in each mcs (0-11)
+ */
+ u32 rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS][HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ s32 rssi_in_dbm; /* rx Signal Strength value in dBm unit */
+
+ u32 rx_11ax_su_ext;
+ u32 rx_11ac_mumimo;
+ u32 rx_11ax_mumimo;
+ u32 rx_11ax_ofdma;
+ u32 txbf;
+ u32 rx_legacy_cck_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ u32 rx_legacy_ofdm_rate[HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+ u32 rx_active_dur_us_low;
+ u32 rx_active_dur_us_high;
+
+ u32 rx_11ax_ul_ofdma;
+
+ u32 ul_ofdma_rx_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+ [HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 ul_ofdma_rx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ u32 ul_ofdma_rx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ u32 ul_ofdma_rx_stbc;
+ u32 ul_ofdma_rx_ldpc;
+
+ /* record the stats for each user index */
+ u32 rx_ulofdma_non_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_data_ppdu[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* ppdu level */
+ u32 rx_ulofdma_mpdu_ok[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+ u32 rx_ulofdma_mpdu_fail[HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* mpdu level */
+
+ u32 nss_count;
+ u32 pilot_count;
+ /* RxEVM stats in dB */
+ s32 rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS];
+ /* rx_pilot_evm_db_mean:
+ * EVM mean across pilots, computed as
+ * mean(10*log10(rx_pilot_evm_linear)) = mean(rx_pilot_evm_db)
+ */
+ s32 rx_pilot_evm_db_mean[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ s8 rx_ul_fd_rssi[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_MAX_OFDMA_NUM_USER]; /* dBm units */
+ /* per_chain_rssi_pkt_type:
+ * This field shows what type of rx frame the per-chain RSSI was computed
+ * on, by recording the frame type and sub-type as bit-fields within this
+ * field:
+ * BIT [3 : 0] :- IEEE80211_FC0_TYPE
+ * BIT [7 : 4] :- IEEE80211_FC0_SUBTYPE
+ * BIT [31 : 8] :- Reserved
+ */
+ u32 per_chain_rssi_pkt_type;
+ s8 rx_per_chain_rssi_in_dbm[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+ [HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ u32 rx_su_ndpa;
+ u32 rx_11ax_su_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_mu_ndpa;
+ u32 rx_11ax_mu_txbf_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_br_poll;
+ u32 rx_11ax_dl_ofdma_mcs[HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+ u32 rx_11ax_dl_ofdma_ru[HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
+
+ u32 rx_ulmumimo_non_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_data_ppdu[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_mpdu_ok[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulmumimo_mpdu_fail[HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+ u32 rx_ulofdma_non_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+ u32 rx_ulofdma_data_nusers[HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+};
+
+/* == RX PDEV/SOC STATS == */
+struct htt_rx_soc_fw_stats_tlv {
+ u32 fw_reo_ring_data_msdu;
+ u32 fw_to_host_data_msdu_bcmc;
+ u32 fw_to_host_data_msdu_uc;
+ u32 ofld_remote_data_buf_recycle_cnt;
+ u32 ofld_remote_free_buf_indication_cnt;
+
+ u32 ofld_buf_to_host_data_msdu_uc;
+ u32 reo_fw_ring_to_host_data_msdu_uc;
+
+ u32 wbm_sw_ring_reap;
+ u32 wbm_forward_to_host_cnt;
+ u32 wbm_target_recycle_cnt;
+
+ u32 target_refill_ring_recycle_cnt;
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_empty_tlv_v {
+ /* HTT_RX_STATS_REFILL_MAX_RING */
+ DECLARE_FLEX_ARRAY(u32, refill_ring_empty_cnt);
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_refill_tlv_v {
+ /* HTT_RX_STATS_REFILL_MAX_RING */
+ DECLARE_FLEX_ARRAY(u32, refill_ring_num_refill);
+};
+
+/* RXDMA error code from WBM released packets */
+enum htt_rx_rxdma_error_code_enum {
+ HTT_RX_RXDMA_OVERFLOW_ERR = 0,
+ HTT_RX_RXDMA_MPDU_LENGTH_ERR = 1,
+ HTT_RX_RXDMA_FCS_ERR = 2,
+ HTT_RX_RXDMA_DECRYPT_ERR = 3,
+ HTT_RX_RXDMA_TKIP_MIC_ERR = 4,
+ HTT_RX_RXDMA_UNECRYPTED_ERR = 5,
+ HTT_RX_RXDMA_MSDU_LEN_ERR = 6,
+ HTT_RX_RXDMA_MSDU_LIMIT_ERR = 7,
+ HTT_RX_RXDMA_WIFI_PARSE_ERR = 8,
+ HTT_RX_RXDMA_AMSDU_PARSE_ERR = 9,
+ HTT_RX_RXDMA_SA_TIMEOUT_ERR = 10,
+ HTT_RX_RXDMA_DA_TIMEOUT_ERR = 11,
+ HTT_RX_RXDMA_FLOW_TIMEOUT_ERR = 12,
+ HTT_RX_RXDMA_FLUSH_REQUEST = 13,
+ HTT_RX_RXDMA_ERR_CODE_RVSD0 = 14,
+ HTT_RX_RXDMA_ERR_CODE_RVSD1 = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_RXDMA_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v {
+ DECLARE_FLEX_ARRAY(u32, rxdma_err); /* HTT_RX_RXDMA_MAX_ERR_CODE */
+};
+
+/* REO error code from WBM released packets */
+enum htt_rx_reo_error_code_enum {
+ HTT_RX_REO_QUEUE_DESC_ADDR_ZERO = 0,
+ HTT_RX_REO_QUEUE_DESC_NOT_VALID = 1,
+ HTT_RX_AMPDU_IN_NON_BA = 2,
+ HTT_RX_NON_BA_DUPLICATE = 3,
+ HTT_RX_BA_DUPLICATE = 4,
+ HTT_RX_REGULAR_FRAME_2K_JUMP = 5,
+ HTT_RX_BAR_FRAME_2K_JUMP = 6,
+ HTT_RX_REGULAR_FRAME_OOR = 7,
+ HTT_RX_BAR_FRAME_OOR = 8,
+ HTT_RX_BAR_FRAME_NO_BA_SESSION = 9,
+ HTT_RX_BAR_FRAME_SN_EQUALS_SSN = 10,
+ HTT_RX_PN_CHECK_FAILED = 11,
+ HTT_RX_2K_ERROR_HANDLING_FLAG_SET = 12,
+ HTT_RX_PN_ERROR_HANDLING_FLAG_SET = 13,
+ HTT_RX_QUEUE_DESCRIPTOR_BLOCKED_SET = 14,
+ HTT_RX_REO_ERR_CODE_RVSD = 15,
+
+ /* This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_REO_MAX_ERR_CODE
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v {
+ DECLARE_FLEX_ARRAY(u32, reo_err); /* HTT_RX_REO_MAX_ERR_CODE */
+};
+
+/* == RX PDEV STATS == */
+#define HTT_STATS_SUBTYPE_MAX 16
+
+struct htt_rx_pdev_fw_stats_tlv {
+ u32 mac_id__word;
+ u32 ppdu_recvd;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 tcp_msdu_cnt;
+ u32 tcp_ack_msdu_cnt;
+ u32 udp_msdu_cnt;
+ u32 other_msdu_cnt;
+ u32 fw_ring_mpdu_ind;
+ u32 fw_ring_mgmt_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_ctrl_subtype[HTT_STATS_SUBTYPE_MAX];
+ u32 fw_ring_mcast_data_msdu;
+ u32 fw_ring_bcast_data_msdu;
+ u32 fw_ring_ucast_data_msdu;
+ u32 fw_ring_null_data_msdu;
+ u32 fw_ring_mpdu_drop;
+ u32 ofld_local_data_ind_cnt;
+ u32 ofld_local_data_buf_recycle_cnt;
+ u32 drx_local_data_ind_cnt;
+ u32 drx_local_data_buf_recycle_cnt;
+ u32 local_nondata_ind_cnt;
+ u32 local_nondata_buf_recycle_cnt;
+
+ u32 fw_status_buf_ring_refill_cnt;
+ u32 fw_status_buf_ring_empty_cnt;
+ u32 fw_pkt_buf_ring_refill_cnt;
+ u32 fw_pkt_buf_ring_empty_cnt;
+ u32 fw_link_buf_ring_refill_cnt;
+ u32 fw_link_buf_ring_empty_cnt;
+
+ u32 host_pkt_buf_ring_refill_cnt;
+ u32 host_pkt_buf_ring_empty_cnt;
+ u32 mon_pkt_buf_ring_refill_cnt;
+ u32 mon_pkt_buf_ring_empty_cnt;
+ u32 mon_status_buf_ring_refill_cnt;
+ u32 mon_status_buf_ring_empty_cnt;
+ u32 mon_desc_buf_ring_refill_cnt;
+ u32 mon_desc_buf_ring_empty_cnt;
+ u32 mon_dest_ring_update_cnt;
+ u32 mon_dest_ring_full_cnt;
+
+ u32 rx_suspend_cnt;
+ u32 rx_suspend_fail_cnt;
+ u32 rx_resume_cnt;
+ u32 rx_resume_fail_cnt;
+ u32 rx_ring_switch_cnt;
+ u32 rx_ring_restore_cnt;
+ u32 rx_flush_cnt;
+ u32 rx_recovery_reset_cnt;
+};
+
+#define HTT_STATS_PHY_ERR_MAX 43
+
+struct htt_rx_pdev_fw_stats_phy_err_tlv {
+ u32 mac_id__word;
+ u32 total_phy_err_cnt;
+ /* Counts of different types of phy errs
+ * The mapping of PHY error types to phy_err array elements is HW dependent.
+ * The only currently-supported mapping is shown below:
+ *
+ * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV
+ * 1 phyrx_err_synth_off
+ * 2 phyrx_err_ofdma_timing
+ * 3 phyrx_err_ofdma_signal_parity
+ * 4 phyrx_err_ofdma_rate_illegal
+ * 5 phyrx_err_ofdma_length_illegal
+ * 6 phyrx_err_ofdma_restart
+ * 7 phyrx_err_ofdma_service
+ * 8 phyrx_err_ppdu_ofdma_power_drop
+ * 9 phyrx_err_cck_blokker
+ * 10 phyrx_err_cck_timing
+ * 11 phyrx_err_cck_header_crc
+ * 12 phyrx_err_cck_rate_illegal
+ * 13 phyrx_err_cck_length_illegal
+ * 14 phyrx_err_cck_restart
+ * 15 phyrx_err_cck_service
+ * 16 phyrx_err_cck_power_drop
+ * 17 phyrx_err_ht_crc_err
+ * 18 phyrx_err_ht_length_illegal
+ * 19 phyrx_err_ht_rate_illegal
+ * 20 phyrx_err_ht_zlf
+ * 21 phyrx_err_false_radar_ext
+ * 22 phyrx_err_green_field
+ * 23 phyrx_err_bw_gt_dyn_bw
+ * 24 phyrx_err_leg_ht_mismatch
+ * 25 phyrx_err_vht_crc_error
+ * 26 phyrx_err_vht_siga_unsupported
+ * 27 phyrx_err_vht_lsig_len_invalid
+ * 28 phyrx_err_vht_ndp_or_zlf
+ * 29 phyrx_err_vht_nsym_lt_zero
+ * 30 phyrx_err_vht_rx_extra_symbol_mismatch
+ * 31 phyrx_err_vht_rx_skip_group_id0
+ * 32 phyrx_err_vht_rx_skip_group_id1to62
+ * 33 phyrx_err_vht_rx_skip_group_id63
+ * 34 phyrx_err_ofdm_ldpc_decoder_disabled
+ * 35 phyrx_err_defer_nap
+ * 36 phyrx_err_fdomain_timeout
+ * 37 phyrx_err_lsig_rel_check
+ * 38 phyrx_err_bt_collision
+ * 39 phyrx_err_unsupported_mu_feedback
+ * 40 phyrx_err_ppdu_tx_interrupt_rx
+ * 41 phyrx_err_unsupported_cbf
+ * 42 phyrx_err_other
+ */
+ u32 phy_err[HTT_STATS_PHY_ERR_MAX];
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_ring_mpdu_err_tlv_v {
+ /* Num error MPDU for each RxDMA error type */
+ DECLARE_FLEX_ARRAY(u32, fw_ring_mpdu_err); /* HTT_RX_STATS_RXDMA_MAX_ERR */
+};
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+struct htt_rx_pdev_fw_mpdu_drop_tlv_v {
+ /* Num MPDU dropped */
+ DECLARE_FLEX_ARRAY(u32, fw_mpdu_drop); /* HTT_RX_STATS_FW_DROP_REASON_MAX */
+};
+
+#define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT (0x1)
+#define HTT_PDEV_CCA_STATS_RX_FRAME_INFO_PRESENT (0x2)
+#define HTT_PDEV_CCA_STATS_RX_CLEAR_INFO_PRESENT (0x4)
+#define HTT_PDEV_CCA_STATS_MY_RX_FRAME_INFO_PRESENT (0x8)
+#define HTT_PDEV_CCA_STATS_USEC_CNT_INFO_PRESENT (0x10)
+#define HTT_PDEV_CCA_STATS_MED_RX_IDLE_INFO_PRESENT (0x20)
+#define HTT_PDEV_CCA_STATS_MED_TX_IDLE_GLOBAL_INFO_PRESENT (0x40)
+#define HTT_PDEV_CCA_STATS_CCA_OBBS_USEC_INFO_PRESENT (0x80)
+
+struct htt_pdev_stats_cca_counters_tlv {
+ /* Below values are obtained from the HW Cycles counter registers */
+ u32 tx_frame_usec;
+ u32 rx_frame_usec;
+ u32 rx_clear_usec;
+ u32 my_rx_frame_usec;
+ u32 usec_cnt;
+ u32 med_rx_idle_usec;
+ u32 med_tx_idle_global_usec;
+ u32 cca_obss_usec;
+};
+
+struct htt_pdev_cca_stats_hist_v1_tlv {
+ u32 chan_num;
+ /* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/
+ u32 num_records;
+ u32 valid_cca_counters_bitmap;
+ u32 collection_interval;
+
+ /* This will be followed by an array which contains the CCA stats
+ * collected in the last N intervals,
+ * if the indication is for last N intervals CCA stats.
+ * Then the pdev_cca_stats[0] element contains the oldest CCA stats
+ * and pdev_cca_stats[N-1] will have the most recent CCA stats.
+ * htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1];
+ */
+};
+
+struct htt_pdev_stats_twt_session_tlv {
+ u32 vdev_id;
+ struct htt_mac_addr peer_mac;
+ u32 flow_id_flags;
+
+ /* TWT_DIALOG_ID_UNAVAILABLE is used
+ * when TWT session is not initiated by host
+ */
+ u32 dialog_id;
+ u32 wake_dura_us;
+ u32 wake_intvl_us;
+ u32 sp_offset_us;
+};
+
+struct htt_pdev_stats_twt_sessions_tlv {
+ u32 pdev_id;
+ u32 num_sessions;
+ struct htt_pdev_stats_twt_session_tlv twt_session[];
+};
+
+enum htt_rx_reo_resource_sample_id_enum {
+ /* Global link descriptor queued in REO */
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_0 = 0,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_1 = 1,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_2 = 2,
+ /*Number of queue descriptors of this aging group */
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC0 = 3,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC1 = 4,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC2 = 5,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC3 = 6,
+ /* Total number of MSDUs buffered in AC */
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC0 = 7,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC1 = 8,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC2 = 9,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC3 = 10,
+
+ HTT_RX_REO_RESOURCE_STATS_MAX = 16
+};
+
+struct htt_rx_reo_resource_stats_tlv_v {
+ /* Variable based on the Number of records. HTT_RX_REO_RESOURCE_STATS_MAX */
+ u32 sample_id;
+ u32 total_max;
+ u32 total_avg;
+ u32 total_sample;
+ u32 non_zeros_avg;
+ u32 non_zeros_sample;
+ u32 last_non_zeros_max;
+ u32 last_non_zeros_min;
+ u32 last_non_zeros_avg;
+ u32 last_non_zeros_sample;
+};
+
+/* == TX SOUNDING STATS == */
+
+enum htt_txbf_sound_steer_modes {
+ HTT_IMPLICIT_TXBF_STEER_STATS = 0,
+ HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS = 1,
+ HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS = 2,
+ HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS = 3,
+ HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS = 4,
+ HTT_TXBF_MAX_NUM_OF_MODES = 5
+};
+
+enum htt_stats_sounding_tx_mode {
+ HTT_TX_AC_SOUNDING_MODE = 0,
+ HTT_TX_AX_SOUNDING_MODE = 1,
+};
+
+struct htt_tx_sounding_stats_tlv {
+ u32 tx_sounding_mode; /* HTT_TX_XX_SOUNDING_MODE */
+ /* Counts number of soundings for all steering modes in each bw */
+ u32 cbf_20[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_40[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_80[HTT_TXBF_MAX_NUM_OF_MODES];
+ u32 cbf_160[HTT_TXBF_MAX_NUM_OF_MODES];
+ /*
+ * The sounding array is a 2-D array stored as an 1-D array of
+ * u32. The stats for a particular user/bw combination is
+ * referenced with the following:
+ *
+ * sounding[(user* max_bw) + bw]
+ *
+ * ... where max_bw == 4 for 160mhz
+ */
+ u32 sounding[HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
+};
+
+struct htt_pdev_obss_pd_stats_tlv {
+ u32 num_obss_tx_ppdu_success;
+ u32 num_obss_tx_ppdu_failure;
+ u32 num_sr_tx_transmissions;
+ u32 num_spatial_reuse_opportunities;
+ u32 num_non_srg_opportunities;
+ u32 num_non_srg_ppdu_tried;
+ u32 num_non_srg_ppdu_success;
+ u32 num_srg_opportunities;
+ u32 num_srg_ppdu_tried;
+ u32 num_srg_ppdu_success;
+ u32 num_psr_opportunities;
+ u32 num_psr_ppdu_tried;
+ u32 num_psr_ppdu_success;
+};
+
+struct htt_ring_backpressure_stats_tlv {
+ u32 pdev_id;
+ u32 current_head_idx;
+ u32 current_tail_idx;
+ u32 num_htt_msgs_sent;
+ /* Time in milliseconds for which the ring has been in
+ * its current backpressure condition
+ */
+ u32 backpressure_time_ms;
+ /* backpressure_hist - histogram showing how many times
+ * different degrees of backpressure duration occurred:
+ * Index 0 indicates the number of times ring was
+ * continuously in backpressure state for 100 - 200ms.
+ * Index 1 indicates the number of times ring was
+ * continuously in backpressure state for 200 - 300ms.
+ * Index 2 indicates the number of times ring was
+ * continuously in backpressure state for 300 - 400ms.
+ * Index 3 indicates the number of times ring was
+ * continuously in backpressure state for 400 - 500ms.
+ * Index 4 indicates the number of times ring was
+ * continuously in backpressure state beyond 500ms.
+ */
+ u32 backpressure_hist[5];
+};
+
+#define HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS 14
+#define HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+
+struct htt_pdev_txrate_txbf_stats_tlv {
+ /* SU TxBF TX MCS stats */
+ u32 tx_su_txbf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Implicit BF TX MCS stats */
+ u32 tx_su_ibf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Open loop TX MCS stats */
+ u32 tx_su_ol_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* SU TxBF TX NSS stats */
+ u32 tx_su_txbf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Implicit BF TX NSS stats */
+ u32 tx_su_ibf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Open loop TX NSS stats */
+ u32 tx_su_ol_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* SU TxBF TX BW stats */
+ u32 tx_su_txbf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Implicit BF TX BW stats */
+ u32 tx_su_ibf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Open loop TX BW stats */
+ u32 tx_su_ol_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+};
+
+struct htt_txbf_ofdma_ndpa_stats_tlv {
+ /* 11AX HE OFDMA NDPA frame queued to the HW */
+ u32 ax_ofdma_ndpa_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndpa_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndpa_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndpa_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_ndp_stats_tlv {
+ /* 11AX HE OFDMA NDP frame queued to the HW */
+ u32 ax_ofdma_ndp_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndp_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndp_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_brp_stats_tlv {
+ /* 11AX HE OFDMA MU BRPOLL frame queued to the HW */
+ u32 ax_ofdma_brpoll_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame sent over the air */
+ u32 ax_ofdma_brpoll_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame flushed by HW */
+ u32 ax_ofdma_brpoll_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame completed with error(s) */
+ u32 ax_ofdma_brp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* Number of CBF(s) received when 11AX HE OFDMA MU BRPOLL frame
+ * completed with error(s).
+ */
+ u32 ax_ofdma_brp_err_num_cbf_rcvd[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS + 1];
+};
+
+struct htt_txbf_ofdma_steer_stats_tlv {
+ /* 11AX HE OFDMA PPDUs that were sent over the air with steering (TXBF + OFDMA) */
+ u32 ax_ofdma_num_ppdu_steer[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA PPDUs that were sent over the air in open loop */
+ u32 ax_ofdma_num_ppdu_ol[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which CBF prefetch was
+ * initiated to PHY HW during TX.
+ */
+ u32 ax_ofdma_num_usrs_prefetch[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was initiated during TX */
+ u32 ax_ofdma_num_usrs_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was forced during TX */
+ u32 ax_ofdma_num_usrs_force_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+#define HTT_MAX_RX_PKT_CNT 8
+#define HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define HTT_MAX_PER_BLK_ERR_CNT 20
+#define HTT_MAX_RX_OTA_ERR_CNT 14
+#define HTT_STATS_MAX_CHAINS 8
+#define ATH11K_STATS_MGMT_FRM_TYPE_MAX 16
+
+struct htt_phy_counters_tlv {
+ /* number of RXTD OFDMA OTA error counts except power surge and drop */
+ u32 rx_ofdma_timing_err_cnt;
+ /* rx_cck_fail_cnt:
+ * number of cck error counts due to rx reception failure because of
+ * timing error in cck
+ */
+ u32 rx_cck_fail_cnt;
+ /* number of times tx abort initiated by mac */
+ u32 mactx_abort_cnt;
+ /* number of times rx abort initiated by mac */
+ u32 macrx_abort_cnt;
+ /* number of times tx abort initiated by phy */
+ u32 phytx_abort_cnt;
+ /* number of times rx abort initiated by phy */
+ u32 phyrx_abort_cnt;
+ /* number of rx deferred count initiated by phy */
+ u32 phyrx_defer_abort_cnt;
+ /* number of sizing events generated at LSTF */
+ u32 rx_gain_adj_lstf_event_cnt;
+ /* number of sizing events generated at non-legacy LTF */
+ u32 rx_gain_adj_non_legacy_cnt;
+ /* rx_pkt_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_cnt[HTT_MAX_RX_PKT_CNT];
+ /* rx_pkt_crc_pass_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_crc_pass_cnt[HTT_MAX_RX_PKT_CRC_PASS_CNT];
+ /* per_blk_err_cnt -
+ * Error count per error source;
+ * [0] = unknown; [1] = LSIG; [2] = HTSIG; [3] = VHTSIG; [4] = HESIG;
+ * [5] = RXTD_OTA; [6] = RXTD_FATAL; [7] = DEMF; [8] = ROBE;
+ * [9] = PMI; [10] = TXFD; [11] = TXTD; [12] = PHYRF
+ * [13-19]=RSVD
+ */
+ u32 per_blk_err_cnt[HTT_MAX_PER_BLK_ERR_CNT];
+ /* rx_ota_err_cnt -
+ * RXTD OTA (over-the-air) error count per error reason;
+ * [0] = voting fail; [1] = weak det fail; [2] = strong sig fail;
+ * [3] = cck fail; [4] = power surge; [5] = power drop;
+ * [6] = btcf timing timeout error; [7] = btcf packet detect error;
+ * [8] = coarse timing timeout error
+ * [9-13]=RSVD
+ */
+ u32 rx_ota_err_cnt[HTT_MAX_RX_OTA_ERR_CNT];
+};
+
+struct htt_phy_stats_tlv {
+ /* per chain hw noise floor values in dBm */
+ s32 nf_chain[HTT_STATS_MAX_CHAINS];
+ /* number of false radars detected */
+ u32 false_radar_cnt;
+ /* number of channel switches happened due to radar detection */
+ u32 radar_cs_cnt;
+ /* ani_level -
+ * ANI level (noise interference) corresponds to the channel
+ * the desense levels range from -5 to 15 in dB units,
+ * higher values indicating more noise interference.
+ */
+ s32 ani_level;
+ /* running time in minutes since FW boot */
+ u32 fw_run_time;
+};
+
+struct htt_phy_reset_counters_tlv {
+ u32 pdev_id;
+ u32 cf_active_low_fail_cnt;
+ u32 cf_active_low_pass_cnt;
+ u32 phy_off_through_vreg_cnt;
+ u32 force_calibration_cnt;
+ u32 rf_mode_switch_phy_off_cnt;
+};
+
+struct htt_phy_reset_stats_tlv {
+ u32 pdev_id;
+ u32 chan_mhz;
+ u32 chan_band_center_freq1;
+ u32 chan_band_center_freq2;
+ u32 chan_phy_mode;
+ u32 chan_flags;
+ u32 chan_num;
+ u32 reset_cause;
+ u32 prev_reset_cause;
+ u32 phy_warm_reset_src;
+ u32 rx_gain_tbl_mode;
+ u32 xbar_val;
+ u32 force_calibration;
+ u32 phyrf_mode;
+ u32 phy_homechan;
+ u32 phy_tx_ch_mask;
+ u32 phy_rx_ch_mask;
+ u32 phybb_ini_mask;
+ u32 phyrf_ini_mask;
+ u32 phy_dfs_en_mask;
+ u32 phy_sscan_en_mask;
+ u32 phy_synth_sel_mask;
+ u32 phy_adfs_freq;
+ u32 cck_fir_settings;
+ u32 phy_dyn_pri_chan;
+ u32 cca_thresh;
+ u32 dyn_cca_status;
+ u32 rxdesense_thresh_hw;
+ u32 rxdesense_thresh_sw;
+};
+
+struct htt_peer_ctrl_path_txrx_stats_tlv {
+ /* peer mac address */
+ u8 peer_mac_addr[ETH_ALEN];
+ u8 rsvd[2];
+ /* Num of tx mgmt frames with subtype on peer level */
+ u32 peer_tx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+ /* Num of rx mgmt frames with subtype on peer level */
+ u32 peer_rx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+};
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+
+void ath11k_debugfs_htt_stats_init(struct ath11k *ar);
+void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+int ath11k_debugfs_htt_stats_req(struct ath11k *ar);
+
+#else /* CONFIG_ATH11K_DEBUGFS */
+
+static inline void ath11k_debugfs_htt_stats_init(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ATH11K_DEBUGFS */
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
new file mode 100644
index 0000000000..9cc4ef28e7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -0,0 +1,888 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "debugfs_sta.h"
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+#include "dp_tx.h"
+#include "debugfs_htt_stats.h"
+
+void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+ struct rate_info *txrate = &arsta->txrate;
+ struct ath11k_htt_tx_stats *tx_stats;
+ int gi, mcs, bw, nss;
+
+ if (!arsta->tx_stats)
+ return;
+
+ tx_stats = arsta->tx_stats;
+ gi = FIELD_GET(RATE_INFO_FLAGS_SHORT_GI, arsta->txrate.flags);
+ mcs = txrate->mcs;
+ bw = ath11k_mac_mac80211_bw_to_ath11k_bw(txrate->bw);
+ nss = txrate->nss - 1;
+
+#define STATS_OP_FMT(name) tx_stats->stats[ATH11K_STATS_TYPE_##name]
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(SUCC).he[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).he[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).he[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).he[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).he[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).he[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
+ STATS_OP_FMT(SUCC).vht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).vht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).vht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).vht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).vht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).vht[1][mcs] += peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(SUCC).ht[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).ht[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).ht[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).ht[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).ht[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).ht[1][mcs] += peer_stats->retry_pkts;
+ } else {
+ mcs = legacy_rate_idx;
+
+ STATS_OP_FMT(SUCC).legacy[0][mcs] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).legacy[1][mcs] += peer_stats->succ_pkts;
+ STATS_OP_FMT(FAIL).legacy[0][mcs] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).legacy[1][mcs] += peer_stats->failed_pkts;
+ STATS_OP_FMT(RETRY).legacy[0][mcs] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).legacy[1][mcs] += peer_stats->retry_pkts;
+ }
+
+ if (peer_stats->is_ampdu) {
+ tx_stats->ba_fails += peer_stats->ba_fails;
+
+ if (txrate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ STATS_OP_FMT(AMPDU).he[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).he[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
+ STATS_OP_FMT(AMPDU).ht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).ht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ STATS_OP_FMT(AMPDU).vht[0][mcs] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).vht[1][mcs] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ }
+ STATS_OP_FMT(AMPDU).bw[0][bw] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).nss[0][nss] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).gi[0][gi] +=
+ peer_stats->succ_bytes + peer_stats->retry_bytes;
+ STATS_OP_FMT(AMPDU).bw[1][bw] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).nss[1][nss] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ STATS_OP_FMT(AMPDU).gi[1][gi] +=
+ peer_stats->succ_pkts + peer_stats->retry_pkts;
+ } else {
+ tx_stats->ack_fails += peer_stats->ba_fails;
+ }
+
+ STATS_OP_FMT(SUCC).bw[0][bw] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).nss[0][nss] += peer_stats->succ_bytes;
+ STATS_OP_FMT(SUCC).gi[0][gi] += peer_stats->succ_bytes;
+
+ STATS_OP_FMT(SUCC).bw[1][bw] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).nss[1][nss] += peer_stats->succ_pkts;
+ STATS_OP_FMT(SUCC).gi[1][gi] += peer_stats->succ_pkts;
+
+ STATS_OP_FMT(FAIL).bw[0][bw] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).nss[0][nss] += peer_stats->failed_bytes;
+ STATS_OP_FMT(FAIL).gi[0][gi] += peer_stats->failed_bytes;
+
+ STATS_OP_FMT(FAIL).bw[1][bw] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).nss[1][nss] += peer_stats->failed_pkts;
+ STATS_OP_FMT(FAIL).gi[1][gi] += peer_stats->failed_pkts;
+
+ STATS_OP_FMT(RETRY).bw[0][bw] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).nss[0][nss] += peer_stats->retry_bytes;
+ STATS_OP_FMT(RETRY).gi[0][gi] += peer_stats->retry_bytes;
+
+ STATS_OP_FMT(RETRY).bw[1][bw] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).nss[1][nss] += peer_stats->retry_pkts;
+ STATS_OP_FMT(RETRY).gi[1][gi] += peer_stats->retry_pkts;
+
+ tx_stats->tx_duration += peer_stats->duration;
+}
+
+void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct hal_tx_status *ts)
+{
+ ath11k_dp_tx_update_txcompl(ar, ts);
+}
+
+static ssize_t ath11k_dbg_sta_dump_tx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_htt_data_stats *stats;
+ static const char *str_name[ATH11K_STATS_TYPE_MAX] = {"succ", "fail",
+ "retry", "ampdu"};
+ static const char *str[ATH11K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
+ int len = 0, i, j, k, retval = 0;
+ const int size = 2 * 4096;
+ char *buf;
+
+ if (!arsta->tx_stats)
+ return -ENOENT;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ for (k = 0; k < ATH11K_STATS_TYPE_MAX; k++) {
+ for (j = 0; j < ATH11K_COUNTER_TYPE_MAX; j++) {
+ stats = &arsta->tx_stats->stats[k];
+ len += scnprintf(buf + len, size - len, "%s_%s\n",
+ str_name[k],
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " HE MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HE_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->he[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " VHT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_VHT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ",
+ stats->vht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len, " HT MCS %s\n",
+ str[j]);
+ for (i = 0; i < ATH11K_HT_MCS_NUM; i++)
+ len += scnprintf(buf + len, size - len,
+ " %llu ", stats->ht[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ len += scnprintf(buf + len, size - len,
+ " BW %s (20,40,80,160 MHz)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->bw[j][0], stats->bw[j][1],
+ stats->bw[j][2], stats->bw[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->nss[j][0], stats->nss[j][1],
+ stats->nss[j][2], stats->nss[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " GI %s (0.4us,0.8us,1.6us,3.2us)\n",
+ str[j]);
+ len += scnprintf(buf + len, size - len,
+ " %llu %llu %llu %llu\n",
+ stats->gi[j][0], stats->gi[j][1],
+ stats->gi[j][2], stats->gi[j][3]);
+ len += scnprintf(buf + len, size - len,
+ " legacy rate %s (1,2 ... Mbps)\n ",
+ str[j]);
+ for (i = 0; i < ATH11K_LEGACY_NUM; i++)
+ len += scnprintf(buf + len, size - len, "%llu ",
+ stats->legacy[j][i]);
+ len += scnprintf(buf + len, size - len, "\n");
+ }
+ }
+
+ len += scnprintf(buf + len, size - len,
+ "\nTX duration\n %llu usecs\n",
+ arsta->tx_stats->tx_duration);
+ len += scnprintf(buf + len, size - len,
+ "BA fails\n %llu\n", arsta->tx_stats->ba_fails);
+ len += scnprintf(buf + len, size - len,
+ "ack fails\n %llu\n", arsta->tx_stats->ack_fails);
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_tx_stats = {
+ .read = ath11k_dbg_sta_dump_tx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_dump_rx_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ int len = 0, i, retval = 0;
+ const int size = 4096;
+ char *buf;
+
+ if (!rx_stats)
+ return -ENOENT;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ len += scnprintf(buf + len, size - len, "RX peer stats:\n");
+ len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
+ rx_stats->num_msdu);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
+ rx_stats->tcp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
+ rx_stats->udp_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
+ rx_stats->ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
+ rx_stats->non_ampdu_msdu_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
+ rx_stats->stbc_count);
+ len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
+ rx_stats->beamformed_count);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
+ rx_stats->num_mpdu_fcs_ok);
+ len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
+ rx_stats->num_mpdu_fcs_err);
+ len += scnprintf(buf + len, size - len,
+ "GI: 0.8us %llu 0.4us %llu 1.6us %llu 3.2us %llu\n",
+ rx_stats->gi_count[0], rx_stats->gi_count[1],
+ rx_stats->gi_count[2], rx_stats->gi_count[3]);
+ len += scnprintf(buf + len, size - len,
+ "BW: 20Mhz %llu 40Mhz %llu 80Mhz %llu 160Mhz %llu\n",
+ rx_stats->bw_count[0], rx_stats->bw_count[1],
+ rx_stats->bw_count[2], rx_stats->bw_count[3]);
+ len += scnprintf(buf + len, size - len, "BCC %llu LDPC %llu\n",
+ rx_stats->coding_count[0], rx_stats->coding_count[1]);
+ len += scnprintf(buf + len, size - len,
+ "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu\n",
+ rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
+ rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
+ rx_stats->pream_cnt[4]);
+ len += scnprintf(buf + len, size - len,
+ "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
+ rx_stats->reception_type[0], rx_stats->reception_type[1],
+ rx_stats->reception_type[2], rx_stats->reception_type[3]);
+ len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
+ len += scnprintf(buf + len, size - len, "\nMCS(0-11) Legacy MCS(12):");
+ for (i = 0; i < HAL_RX_MAX_MCS + 1; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->mcs_count[i]);
+ len += scnprintf(buf + len, size - len, "\nNSS(1-8):");
+ for (i = 0; i < HAL_RX_MAX_NSS; i++)
+ len += scnprintf(buf + len, size - len, "%llu ", rx_stats->nss_count[i]);
+ len += scnprintf(buf + len, size - len, "\nRX Duration:%llu ",
+ rx_stats->rx_duration);
+ len += scnprintf(buf + len, size - len,
+ "\nDCM: %llu\nRU: 26 %llu 52: %llu 106: %llu 242: %llu 484: %llu 996: %llu\n",
+ rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
+ rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
+ rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
+ rx_stats->ru_alloc_cnt[5]);
+
+ len += scnprintf(buf + len, size - len, "\n");
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ mutex_unlock(&ar->conf_mutex);
+ return retval;
+}
+
+static const struct file_operations fops_rx_stats = {
+ .read = ath11k_dbg_sta_dump_rx_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int
+ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ struct ieee80211_sta *sta = inode->i_private;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct debug_htt_stats_req *stats_req;
+ int type = ar->debug.htt_stats.type;
+ int ret;
+
+ if ((type != ATH11K_DBG_HTT_EXT_STATS_PEER_INFO &&
+ type != ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
+ if (!stats_req)
+ return -ENOMEM;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+ memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
+ ret = ath11k_debugfs_htt_stats_req(ar);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+ return 0;
+out:
+ vfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
+ return ret;
+}
+
+static int
+ath11k_dbg_sta_release_htt_peer_stats(struct inode *inode, struct file *file)
+{
+ struct ieee80211_sta *sta = inode->i_private;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+ vfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static ssize_t ath11k_dbg_sta_read_htt_peer_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length = 0;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH11K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_htt_peer_stats = {
+ .open = ath11k_dbg_sta_open_htt_peer_stats,
+ .release = ath11k_dbg_sta_release_htt_peer_stats,
+ .read = ath11k_dbg_sta_read_htt_peer_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_peer_pktlog(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ int ret, enable;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ ret = kstrtoint_from_user(buf, count, 0, &enable);
+ if (ret)
+ goto out;
+
+ ar->debug.pktlog_peer_valid = enable;
+ memcpy(ar->debug.pktlog_peer_addr, sta->addr, ETH_ALEN);
+
+ /* Send peer based pktlog enable/disable */
+ ret = ath11k_wmi_pdev_peer_pktlog_filter(ar, sta->addr, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set peer pktlog filter %pM: %d\n",
+ sta->addr, ret);
+ goto out;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "peer pktlog filter set to %d\n",
+ enable);
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_dbg_sta_read_peer_pktlog(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[32] = {0};
+ int len;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf), "%08x %pM\n",
+ ar->debug.pktlog_peer_valid,
+ ar->debug.pktlog_peer_addr);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_pktlog = {
+ .write = ath11k_dbg_sta_write_peer_pktlog,
+ .read = ath11k_dbg_sta_read_peer_pktlog,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_delba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, initiator, reason;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
+ if (ret != 3)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, initiator, reason);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, initiator,
+ reason);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_delba = {
+ .write = ath11k_dbg_sta_write_delba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_addba_resp(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, status;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &status);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, status);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, status);
+ }
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba_resp = {
+ .write = ath11k_dbg_sta_write_addba_resp,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_write_addba(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 tid, buf_size;
+ int ret;
+ char buf[64] = {0};
+
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
+ user_buf, count);
+ if (ret <= 0)
+ return ret;
+
+ ret = sscanf(buf, "%u %u", &tid, &buf_size);
+ if (ret != 2)
+ return -EINVAL;
+
+ /* Valid TID values are 0 through 15 */
+ if (tid > HAL_DESC_REO_NON_QOS_TID - 1)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ arsta->aggr_mode != ATH11K_DBG_AGGR_MODE_MANUAL) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
+ tid, buf_size);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
+ arsta->arvif->vdev_id, sta->addr, tid, buf_size);
+ }
+
+ ret = count;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_addba = {
+ .write = ath11k_dbg_sta_write_addba,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_aggr_mode(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[64];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len,
+ "aggregation mode: %s\n\n%s\n%s\n",
+ (arsta->aggr_mode == ATH11K_DBG_AGGR_MODE_AUTO) ?
+ "auto" : "manual", "auto = 0", "manual = 1");
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_dbg_sta_write_aggr_mode(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u32 aggr_mode;
+ int ret;
+
+ if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
+ return -EINVAL;
+
+ if (aggr_mode >= ATH11K_DBG_AGGR_MODE_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON ||
+ aggr_mode == arsta->aggr_mode) {
+ ret = count;
+ goto out;
+ }
+
+ ret = ath11k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear addba session ret: %d\n",
+ ret);
+ goto out;
+ }
+
+ arsta->aggr_mode = aggr_mode;
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_aggr_mode = {
+ .read = ath11k_dbg_sta_read_aggr_mode,
+ .write = ath11k_dbg_sta_write_aggr_mode,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+ath11k_write_htt_peer_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ int ret;
+ u8 type;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (!type)
+ return ret;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params.cfg0 |= FIELD_PREP(GENMASK(15, 1),
+ HTT_PEER_STATS_REQ_MODE_FLUSH_TQM);
+
+ cfg_params.cfg1 = HTT_STAT_DEFAULT_PEER_REQ_TYPE;
+
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(7, 0), sta->addr[0]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(15, 8), sta->addr[1]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(23, 16), sta->addr[2]);
+ cfg_params.cfg2 |= FIELD_PREP(GENMASK(31, 24), sta->addr[3]);
+
+ cfg_params.cfg3 |= FIELD_PREP(GENMASK(7, 0), sta->addr[4]);
+ cfg_params.cfg3 |= FIELD_PREP(GENMASK(15, 8), sta->addr[5]);
+
+ cfg_params.cfg3 |= ATH11K_HTT_PEER_STATS_RESET;
+
+ ret = ath11k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send htt peer stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ret = count;
+
+ return ret;
+}
+
+static const struct file_operations fops_htt_peer_stats_reset = {
+ .write = ath11k_write_htt_peer_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_peer_ps_state(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", arsta->peer_ps_state);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_ps_state = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_peer_ps_state,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_current_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count,
+ loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ u64 time_since_station_in_power_save;
+ char buf[20];
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ time_since_station_in_power_save = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies);
+ else
+ time_since_station_in_power_save = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n",
+ time_since_station_in_power_save);
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_current_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_current_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_dbg_sta_read_total_ps_duration(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ char buf[20];
+ u64 power_save_duration;
+ int len;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON &&
+ arsta->peer_current_ps_valid)
+ power_save_duration = jiffies_to_msecs(jiffies
+ - arsta->ps_start_jiffies)
+ + arsta->ps_total_duration;
+ else
+ power_save_duration = arsta->ps_total_duration;
+
+ len = scnprintf(buf, sizeof(buf), "%llu\n", power_save_duration);
+
+ spin_unlock_bh(&ar->data_lock);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_total_ps_duration = {
+ .open = simple_open,
+ .read = ath11k_dbg_sta_read_total_ps_duration,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
+ debugfs_create_file("tx_stats", 0400, dir, sta,
+ &fops_tx_stats);
+ if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
+ debugfs_create_file("rx_stats", 0400, dir, sta,
+ &fops_rx_stats);
+
+ debugfs_create_file("htt_peer_stats", 0400, dir, sta,
+ &fops_htt_peer_stats);
+
+ debugfs_create_file("peer_pktlog", 0644, dir, sta,
+ &fops_peer_pktlog);
+
+ debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
+ debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
+ debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
+ debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
+
+ if (test_bit(WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET,
+ ar->ab->wmi_ab.svc_map))
+ debugfs_create_file("htt_peer_stats_reset", 0600, dir, sta,
+ &fops_htt_peer_stats_reset);
+
+ debugfs_create_file("peer_ps_state", 0400, dir, sta,
+ &fops_peer_ps_state);
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ debugfs_create_file("current_ps_duration", 0440, dir, sta,
+ &fops_current_ps_duration);
+ debugfs_create_file("total_ps_duration", 0440, dir, sta,
+ &fops_total_ps_duration);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.h b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
new file mode 100644
index 0000000000..e6c11b3a40
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUGFS_STA_H_
+#define _ATH11K_DEBUGFS_STA_H_
+
+#include <net/mac80211.h>
+
+#include "core.h"
+#include "hal_tx.h"
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+
+void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx);
+void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct hal_tx_status *ts);
+
+#else /* CONFIG_ATH11K_DEBUGFS */
+
+#define ath11k_debugfs_sta_op_add NULL
+
+static inline void
+ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+}
+
+static inline void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct hal_tx_status *ts)
+{
+}
+
+#endif /* CONFIG_ATH11K_DEBUGFS */
+
+#endif /* _ATH11K_DEBUGFS_STA_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
new file mode 100644
index 0000000000..d070bcb3fe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -0,0 +1,1190 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <crypto/hash.h>
+#include "core.h"
+#include "dp_tx.h"
+#include "hal_tx.h"
+#include "hif.h"
+#include "debug.h"
+#include "dp_rx.h"
+#include "peer.h"
+
+static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+
+ /* TODO: Any other peer specific DP cleanup */
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
+ addr, vdev_id);
+ spin_unlock_bh(&ab->base_lock);
+ return;
+ }
+
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ peer->dp_setup_done = false;
+ crypto_free_shash(peer->tfm_mmic);
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ u32 reo_dest;
+ int ret = 0, tid;
+
+ /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
+ reo_dest = ar->dp.mac_id + 1;
+ ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
+ WMI_PEER_SET_DEFAULT_ROUTING,
+ DP_RX_HASH_ENABLE | (reo_dest << 1));
+
+ if (ret) {
+ ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
+ ret, addr, vdev_id);
+ return ret;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
+ HAL_PN_TYPE_NONE);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
+ tid, ret);
+ goto peer_clean;
+ }
+ }
+
+ ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rx defrag context\n");
+ tid--;
+ goto peer_clean;
+ }
+
+ /* TODO: Setup other peer specific resource used in data path */
+
+ return 0;
+
+peer_clean:
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to del rx tid\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (; tid >= 0; tid--)
+ ath11k_peer_rx_tid_delete(ar, peer, tid);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
+}
+
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
+{
+ if (!ring->vaddr_unaligned)
+ return;
+
+ if (ring->cached)
+ kfree(ring->vaddr_unaligned);
+ else
+ dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+ ring->paddr_unaligned);
+
+ ring->vaddr_unaligned = NULL;
+}
+
+static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
+{
+ int ext_group_num;
+ u8 mask = 1 << ring_num;
+
+ for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
+ ext_group_num++) {
+ if (mask & grp_mask[ext_group_num])
+ return ext_group_num;
+ }
+
+ return -ENOENT;
+}
+
+static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
+ enum hal_ring_type type, int ring_num)
+{
+ const u8 *grp_mask;
+
+ switch (type) {
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num == DP_RX_RELEASE_RING_NUM) {
+ grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
+ ring_num = 0;
+ } else {
+ grp_mask = &ab->hw_params.ring_mask->tx[0];
+ }
+ break;
+ case HAL_REO_EXCEPTION:
+ grp_mask = &ab->hw_params.ring_mask->rx_err[0];
+ break;
+ case HAL_REO_DST:
+ grp_mask = &ab->hw_params.ring_mask->rx[0];
+ break;
+ case HAL_REO_STATUS:
+ grp_mask = &ab->hw_params.ring_mask->reo_status[0];
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ case HAL_RXDMA_MONITOR_DST:
+ grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
+ break;
+ case HAL_RXDMA_DST:
+ grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
+ break;
+ case HAL_RXDMA_BUF:
+ grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_REO_CMD:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_TCL_STATUS:
+ case HAL_REO_REINJECT:
+ case HAL_CE_SRC:
+ case HAL_CE_DST:
+ case HAL_CE_DST_STATUS:
+ default:
+ return -ENOENT;
+ }
+
+ return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
+}
+
+static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
+ struct hal_srng_params *ring_params,
+ enum hal_ring_type type, int ring_num)
+{
+ int msi_group_number, msi_data_count;
+ u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
+ int ret;
+
+ ret = ath11k_get_user_msi_vector(ab, "DP",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+ if (ret)
+ return;
+
+ msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
+ ring_num);
+ if (msi_group_number < 0) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "ring not part of an ext_group; ring_type: %d,ring_num %d",
+ type, ring_num);
+ ring_params->msi_addr = 0;
+ ring_params->msi_data = 0;
+ return;
+ }
+
+ if (msi_group_number > msi_data_count) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "multiple msi_groups share one msi, msi_group_num %d",
+ msi_group_number);
+ }
+
+ ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_group_number % msi_data_count)
+ + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries)
+{
+ struct hal_srng_params params = { 0 };
+ int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
+ int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
+ int ret;
+ bool cached = false;
+
+ if (max_entries < 0 || entry_sz < 0)
+ return -EINVAL;
+
+ if (num_entries > max_entries)
+ num_entries = max_entries;
+
+ ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
+
+ if (ab->hw_params.alloc_cacheable_memory) {
+ /* Allocate the reo dst and tx completion rings from cacheable memory */
+ switch (type) {
+ case HAL_REO_DST:
+ case HAL_WBM2SW_RELEASE:
+ cached = true;
+ break;
+ default:
+ cached = false;
+ }
+
+ if (cached) {
+ ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
+ ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
+ }
+ }
+
+ if (!cached)
+ ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+ &ring->paddr_unaligned,
+ GFP_KERNEL);
+
+ if (!ring->vaddr_unaligned)
+ return -ENOMEM;
+
+ ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
+ ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
+ (unsigned long)ring->vaddr_unaligned);
+
+ params.ring_base_vaddr = ring->vaddr;
+ params.ring_base_paddr = ring->paddr;
+ params.num_entries = num_entries;
+ ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
+
+ switch (type) {
+ case HAL_REO_DST:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_RX;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_RXDMA_BUF:
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_RXDMA_MONITOR_STATUS:
+ params.low_threshold = num_entries >> 3;
+ params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
+ params.intr_batch_cntr_thres_entries = 0;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+ break;
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num < 3) {
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_TX;
+ params.intr_timer_thres_us =
+ HAL_SRNG_INT_TIMER_THRESHOLD_TX;
+ break;
+ }
+ /* follow through when ring_num >= 3 */
+ fallthrough;
+ case HAL_REO_EXCEPTION:
+ case HAL_REO_REINJECT:
+ case HAL_REO_CMD:
+ case HAL_REO_STATUS:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_TCL_STATUS:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_RXDMA_DST:
+ case HAL_RXDMA_MONITOR_DST:
+ case HAL_RXDMA_MONITOR_DESC:
+ params.intr_batch_cntr_thres_entries =
+ HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
+ params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
+ break;
+ case HAL_RXDMA_DIR_BUF:
+ break;
+ default:
+ ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
+ return -EINVAL;
+ }
+
+ if (cached) {
+ params.flags |= HAL_SRNG_FLAGS_CACHED;
+ ring->cached = 1;
+ }
+
+ ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
+ ret, ring_num);
+ return ret;
+ }
+
+ ring->ring_id = ret;
+
+ return 0;
+}
+
+void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
+{
+ int i;
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++)
+ ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
+
+ ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
+}
+
+static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_stop_shadow_timers(ab);
+ ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
+ }
+ ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
+}
+
+static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ int i, ret;
+ u8 tcl_num, wbm_num;
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
+ HAL_SW2WBM_RELEASE, 0, 0,
+ DP_WBM_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
+ DP_TCL_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
+ 0, 0, DP_TCL_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
+ wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
+ HAL_TCL_DATA, tcl_num, 0,
+ ab->hw_params.tx_ring_size);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
+ HAL_WBM2SW_RELEASE, wbm_num, 0,
+ DP_TX_COMP_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
+ i, ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
+ ath11k_hal_tx_init_data_ring(ab, srng);
+
+ ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
+ ATH11K_SHADOW_DP_TIMER_INTERVAL,
+ dp->tx_ring[i].tcl_data_ring.ring_id);
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
+ 0, 0, DP_REO_REINJECT_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
+ DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
+ 0, 0, DP_REO_EXCEPTION_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
+ 0, 0, DP_REO_CMD_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
+ goto err;
+ }
+
+ srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ ath11k_hal_reo_init_cmd_ring(ab, srng);
+
+ ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
+ ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
+ dp->reo_cmd_ring.ring_id);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
+ 0, 0, DP_REO_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
+ goto err;
+ }
+
+ /* When hash based routing of rx packet is enabled, 32 entries to map
+ * the hash values to the ring will be configured.
+ */
+ ab->hw_params.hw_ops->reo_setup(ab);
+
+ return 0;
+
+err:
+ ath11k_dp_srng_common_cleanup(ab);
+
+ return ret;
+}
+
+static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ int i;
+
+ for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
+ if (!slist[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ slist[i].vaddr, slist[i].paddr);
+ slist[i].vaddr = NULL;
+ }
+}
+
+static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
+ int size,
+ u32 n_link_desc_bank,
+ u32 n_link_desc,
+ u32 last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
+ struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
+ u32 n_entries_per_buf;
+ int num_scatter_buf, scatter_idx;
+ struct hal_wbm_link_desc *scatter_buf;
+ int align_bytes, n_entries;
+ dma_addr_t paddr;
+ int rem_entries;
+ int i;
+ int ret = 0;
+ u32 end_offset;
+
+ n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
+ ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
+ num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
+
+ if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < num_scatter_buf; i++) {
+ slist[i].vaddr = dma_alloc_coherent(ab->dev,
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
+ &slist[i].paddr, GFP_KERNEL);
+ if (!slist[i].vaddr) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ }
+
+ scatter_idx = 0;
+ scatter_buf = slist[scatter_idx].vaddr;
+ rem_entries = n_entries_per_buf;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries) {
+ ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ if (rem_entries) {
+ rem_entries--;
+ scatter_buf++;
+ continue;
+ }
+
+ rem_entries = n_entries_per_buf;
+ scatter_idx++;
+ scatter_buf = slist[scatter_idx].vaddr;
+ }
+ }
+
+ end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
+ sizeof(struct hal_wbm_link_desc);
+ ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
+ n_link_desc, end_offset);
+
+ return 0;
+
+err:
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+
+ return ret;
+}
+
+static void
+ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks)
+{
+ int i;
+
+ for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
+ if (link_desc_banks[i].vaddr_unaligned) {
+ dma_free_coherent(ab->dev,
+ link_desc_banks[i].size,
+ link_desc_banks[i].vaddr_unaligned,
+ link_desc_banks[i].paddr_unaligned);
+ link_desc_banks[i].vaddr_unaligned = NULL;
+ }
+ }
+}
+
+static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ int n_link_desc_bank,
+ int last_bank_sz)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+ int ret = 0;
+ int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ if (i == (n_link_desc_bank - 1) && last_bank_sz)
+ desc_sz = last_bank_sz;
+
+ desc_bank[i].vaddr_unaligned =
+ dma_alloc_coherent(ab->dev, desc_sz,
+ &desc_bank[i].paddr_unaligned,
+ GFP_KERNEL);
+ if (!desc_bank[i].vaddr_unaligned) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
+ HAL_LINK_DESC_ALIGN);
+ desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
+ ((unsigned long)desc_bank[i].vaddr -
+ (unsigned long)desc_bank[i].vaddr_unaligned);
+ desc_bank[i].size = desc_sz;
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
+
+ return ret;
+}
+
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring)
+{
+ ath11k_dp_link_desc_bank_free(ab, desc_bank);
+
+ if (ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ath11k_dp_srng_cleanup(ab, ring);
+ ath11k_dp_scatter_idle_link_desc_cleanup(ab);
+ }
+}
+
+static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ u32 n_mpdu_link_desc, n_mpdu_queue_desc;
+ u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
+ int ret = 0;
+
+ n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
+ HAL_NUM_MPDUS_PER_LINK_DESC;
+
+ n_mpdu_queue_desc = n_mpdu_link_desc /
+ HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
+
+ n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
+ DP_AVG_MSDUS_PER_FLOW) /
+ HAL_NUM_TX_MSDUS_PER_LINK_DESC;
+
+ n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
+ DP_AVG_MSDUS_PER_MPDU) /
+ HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
+ n_tx_msdu_link_desc + n_rx_msdu_link_desc;
+
+ if (*n_link_desc & (*n_link_desc - 1))
+ *n_link_desc = 1 << fls(*n_link_desc);
+
+ ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
+ HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+ return ret;
+}
+
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc)
+{
+ u32 tot_mem_sz;
+ u32 n_link_desc_bank, last_bank_sz;
+ u32 entry_sz, align_bytes, n_entries;
+ u32 paddr;
+ u32 *desc;
+ int i, ret;
+
+ tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
+ tot_mem_sz += HAL_LINK_DESC_ALIGN;
+
+ if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
+ n_link_desc_bank = 1;
+ last_bank_sz = tot_mem_sz;
+ } else {
+ n_link_desc_bank = tot_mem_sz /
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+ last_bank_sz = tot_mem_sz %
+ (DP_LINK_DESC_ALLOC_SIZE_THRESH -
+ HAL_LINK_DESC_ALIGN);
+
+ if (last_bank_sz)
+ n_link_desc_bank += 1;
+ }
+
+ if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
+ return -EINVAL;
+
+ ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
+ n_link_desc_bank, last_bank_sz);
+ if (ret)
+ return ret;
+
+ /* Setup link desc idle list for HW internal usage */
+ entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
+ tot_mem_sz = entry_sz * n_link_desc;
+
+ /* Setup scatter desc list when the total memory requirement is more */
+ if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
+ ring_type != HAL_RXDMA_MONITOR_DESC) {
+ ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
+ n_link_desc_bank,
+ n_link_desc,
+ last_bank_sz);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
+ ret);
+ goto fail_desc_bank_free;
+ }
+
+ return 0;
+ }
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ for (i = 0; i < n_link_desc_bank; i++) {
+ align_bytes = link_desc_banks[i].vaddr -
+ link_desc_banks[i].vaddr_unaligned;
+ n_entries = (link_desc_banks[i].size - align_bytes) /
+ HAL_LINK_DESC_SIZE;
+ paddr = link_desc_banks[i].paddr;
+ while (n_entries &&
+ (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
+ ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
+ i, paddr);
+ n_entries--;
+ paddr += HAL_LINK_DESC_SIZE;
+ }
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+fail_desc_bank_free:
+ ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
+
+ return ret;
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget)
+{
+ struct napi_struct *napi = &irq_grp->napi;
+ const struct ath11k_hw_hal_params *hal_params;
+ int grp_id = irq_grp->grp_id;
+ int work_done = 0;
+ int i, j;
+ int tot_work_done = 0;
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
+ ab->hw_params.ring_mask->tx[grp_id])
+ ath11k_dp_tx_completion_handler(ab, i);
+ }
+
+ if (ab->hw_params.ring_mask->rx_err[grp_id]) {
+ work_done = ath11k_dp_process_rx_err(ab, napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
+ work_done = ath11k_dp_rx_process_wbm_err(ab,
+ napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params.ring_mask->rx[grp_id]) {
+ i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
+ work_done = ath11k_dp_process_rx(ab, i, napi,
+ budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ if (budget <= 0)
+ goto done;
+ }
+
+ if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+
+ if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
+ BIT(id)) {
+ work_done =
+ ath11k_dp_rx_process_mon_rings(ab,
+ id,
+ napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
+ }
+ }
+ }
+
+ if (ab->hw_params.ring_mask->reo_status[grp_id])
+ ath11k_dp_process_reo_status(ab);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+
+ if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
+ work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
+
+ if (budget <= 0)
+ goto done;
+
+ if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
+ struct ath11k *ar = ath11k_ab_to_ar(ab, id);
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ hal_params = ab->hw_params.hal_params;
+ ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
+ hal_params->rx_buf_rbm);
+ }
+ }
+ }
+ /* TODO: Implement handler for other interrupts */
+
+done:
+ return tot_work_done;
+}
+EXPORT_SYMBOL(ath11k_dp_service_srng);
+
+void ath11k_dp_pdev_free(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int i;
+
+ del_timer_sync(&ab->mon_reap_timer);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ath11k_dp_rx_pdev_free(ab, i);
+ ath11k_debugfs_unregister(ar);
+ ath11k_dp_rx_pdev_mon_detach(ar);
+ }
+}
+
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ int i;
+ int j;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ dp = &ar->dp;
+ dp->mac_id = i;
+ idr_init(&dp->rx_refill_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
+ atomic_set(&dp->num_tx_pending, 0);
+ init_waitqueue_head(&dp->tx_empty_waitq);
+ for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
+ }
+ idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
+ spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
+ }
+}
+
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ int ret;
+ int i;
+
+ /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ ret = ath11k_dp_rx_pdev_alloc(ab, i);
+ if (ret) {
+ ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
+ i);
+ goto err;
+ }
+ ret = ath11k_dp_rx_pdev_mon_attach(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to initialize mon pdev %d\n",
+ i);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ ath11k_dp_pdev_free(ab);
+
+ return ret;
+}
+
+int ath11k_dp_htt_connect(struct ath11k_dp *dp)
+{
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int status;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
+
+ /* connect to control service */
+ conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
+
+ status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
+ &conn_resp);
+
+ if (status)
+ return status;
+
+ dp->eid = conn_resp.eid;
+
+ return 0;
+}
+
+static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
+{
+ /* When v2_map_support is true:for STA mode, enable address
+ * search index, tcl uses ast_hash value in the descriptor.
+ * When v2_map_support is false: for STA mode, don't enable
+ * address search index.
+ */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ } else {
+ arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ }
+ break;
+ case WMI_VDEV_TYPE_AP:
+ case WMI_VDEV_TYPE_IBSS:
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ default:
+ return;
+ }
+}
+
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
+{
+ arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
+ FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
+ arvif->vdev_id) |
+ FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
+ ar->pdev->pdev_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+
+ ath11k_dp_update_vdev_search(arvif);
+}
+
+static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k_base *ab = (struct ath11k_base *)ctx;
+ struct sk_buff *msdu = skb;
+
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ dev_kfree_skb_any(msdu);
+
+ return 0;
+}
+
+void ath11k_dp_free(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ ath11k_dp_srng_common_cleanup(ab);
+
+ ath11k_dp_reo_cmd_list_cleanup(ab);
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
+ idr_for_each(&dp->tx_ring[i].txbuf_idr,
+ ath11k_dp_tx_pending_cleanup, ab);
+ idr_destroy(&dp->tx_ring[i].txbuf_idr);
+ spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
+ kfree(dp->tx_ring[i].tx_status);
+ }
+
+ /* Deinit any SOC level resource */
+}
+
+int ath11k_dp_alloc(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng = NULL;
+ size_t size = 0;
+ u32 n_link_desc = 0;
+ int ret;
+ int i;
+
+ dp->ab = ab;
+
+ INIT_LIST_HEAD(&dp->reo_cmd_list);
+ INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+ INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
+ spin_lock_init(&dp->reo_cmd_lock);
+
+ dp->reo_cmd_cache_flush_count = 0;
+
+ ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
+ return ret;
+ }
+
+ srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, srng, n_link_desc);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_common_setup(ab);
+ if (ret)
+ goto fail_link_desc_cleanup;
+
+ size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ idr_init(&dp->tx_ring[i].txbuf_idr);
+ spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
+ dp->tx_ring[i].tcl_data_ring_id = i;
+
+ dp->tx_ring[i].tx_status_head = 0;
+ dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
+ dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
+ if (!dp->tx_ring[i].tx_status) {
+ ret = -ENOMEM;
+ goto fail_cmn_srng_cleanup;
+ }
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
+ ath11k_hal_tx_set_dscp_tid_map(ab, i);
+
+ /* Init any SOC level resource for DP */
+
+ return 0;
+
+fail_cmn_srng_cleanup:
+ ath11k_dp_srng_common_cleanup(ab);
+
+fail_link_desc_cleanup:
+ ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
+ HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
+
+ return ret;
+}
+
+static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
+{
+ struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
+ t, timer);
+ struct ath11k_base *ab = update_timer->ab;
+ struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ /* when the timer is fired, the handler checks whether there
+ * are new TX happened. The handler updates HP only when there
+ * are no TX operations during the timeout interval, and stop
+ * the timer. Timer will be started again when TX happens again.
+ */
+ if (update_timer->timer_tx_num != update_timer->tx_num) {
+ update_timer->timer_tx_num = update_timer->tx_num;
+ mod_timer(&update_timer->timer, jiffies +
+ msecs_to_jiffies(update_timer->interval));
+ } else {
+ update_timer->started = false;
+ ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
+ }
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
+ struct hal_srng *srng,
+ struct ath11k_hp_update_timer *update_timer)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ update_timer->tx_num++;
+
+ if (update_timer->started)
+ return;
+
+ update_timer->started = true;
+ update_timer->timer_tx_num = update_timer->tx_num;
+ mod_timer(&update_timer->timer, jiffies +
+ msecs_to_jiffies(update_timer->interval));
+}
+
+void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ if (!update_timer->init)
+ return;
+
+ del_timer_sync(&update_timer->timer);
+}
+
+void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer,
+ u32 interval, u32 ring_id)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ update_timer->tx_num = 0;
+ update_timer->timer_tx_num = 0;
+ update_timer->ab = ab;
+ update_timer->ring_id = ring_id;
+ update_timer->interval = interval;
+ update_timer->init = true;
+ timer_setup(&update_timer->timer,
+ ath11k_dp_shadow_timer_handler, 0);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
new file mode 100644
index 0000000000..15815af453
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -0,0 +1,1716 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_H
+#define ATH11K_DP_H
+
+#include "hal_rx.h"
+
+#define MAX_RXDMA_PER_PDEV 2
+
+struct ath11k_base;
+struct ath11k_peer;
+struct ath11k_dp;
+struct ath11k_vif;
+struct hal_tcl_status_ring;
+struct ath11k_ext_irq_grp;
+
+struct dp_rx_tid {
+ u8 tid;
+ u32 *vaddr;
+ dma_addr_t paddr;
+ u32 size;
+ u32 ba_win_sz;
+ bool active;
+
+ /* Info related to rx fragments */
+ u32 cur_sn;
+ u16 last_frag_no;
+ u16 rx_frag_bitmap;
+
+ struct sk_buff_head rx_frags;
+ struct hal_reo_dest_ring *dst_ring_desc;
+
+ /* Timer info related to fragments */
+ struct timer_list frag_timer;
+ struct ath11k_base *ab;
+};
+
+#define DP_REO_DESC_FREE_THRESHOLD 64
+#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
+#define DP_MON_PURGE_TIMEOUT_MS 100
+#define DP_MON_SERVICE_BUDGET 128
+
+struct dp_reo_cache_flush_elem {
+ struct list_head list;
+ struct dp_rx_tid data;
+ unsigned long ts;
+};
+
+struct dp_reo_cmd {
+ struct list_head list;
+ struct dp_rx_tid data;
+ int cmd_num;
+ void (*handler)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status status);
+};
+
+struct dp_srng {
+ u32 *vaddr_unaligned;
+ u32 *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ int size;
+ u32 ring_id;
+ u8 cached;
+};
+
+struct dp_rxdma_ring {
+ struct dp_srng refill_buf_ring;
+ struct idr bufs_idr;
+ /* Protects bufs_idr */
+ spinlock_t idr_lock;
+ int bufs_max;
+};
+
+#define ATH11K_TX_COMPL_NEXT(x) (((x) + 1) % DP_TX_COMP_RING_SIZE)
+
+struct dp_tx_ring {
+ u8 tcl_data_ring_id;
+ struct dp_srng tcl_data_ring;
+ struct dp_srng tcl_comp_ring;
+ struct idr txbuf_idr;
+ /* Protects txbuf_idr and num_pending */
+ spinlock_t tx_idr_lock;
+ struct hal_wbm_release_ring *tx_status;
+ int tx_status_head;
+ int tx_status_tail;
+};
+
+enum dp_mon_status_buf_state {
+ /* PPDU id matches in dst ring and status ring */
+ DP_MON_STATUS_MATCH,
+ /* status ring dma is not done */
+ DP_MON_STATUS_NO_DMA,
+ /* status ring is lagging, reap status ring */
+ DP_MON_STATUS_LAG,
+ /* status ring is leading, reap dst ring and drop */
+ DP_MON_STATUS_LEAD,
+ /* replinish monitor status ring */
+ DP_MON_STATUS_REPLINISH,
+};
+
+struct ath11k_pdev_mon_stats {
+ u32 status_ppdu_state;
+ u32 status_ppdu_start;
+ u32 status_ppdu_end;
+ u32 status_ppdu_compl;
+ u32 status_ppdu_start_mis;
+ u32 status_ppdu_end_mis;
+ u32 status_ppdu_done;
+ u32 dest_ppdu_done;
+ u32 dest_mpdu_done;
+ u32 dest_mpdu_drop;
+ u32 dup_mon_linkdesc_cnt;
+ u32 dup_mon_buf_cnt;
+ u32 dest_mon_stuck;
+ u32 dest_mon_not_reaped;
+};
+
+struct dp_full_mon_mpdu {
+ struct list_head list;
+ struct sk_buff *head;
+ struct sk_buff *tail;
+};
+
+struct dp_link_desc_bank {
+ void *vaddr_unaligned;
+ void *vaddr;
+ dma_addr_t paddr_unaligned;
+ dma_addr_t paddr;
+ u32 size;
+};
+
+/* Size to enforce scatter idle list mode */
+#define DP_LINK_DESC_ALLOC_SIZE_THRESH 0x200000
+#define DP_LINK_DESC_BANKS_MAX 8
+
+#define DP_RX_DESC_COOKIE_INDEX_MAX 0x3ffff
+#define DP_RX_DESC_COOKIE_POOL_ID_MAX 0x1c0000
+#define DP_RX_DESC_COOKIE_MAX \
+ (DP_RX_DESC_COOKIE_INDEX_MAX | DP_RX_DESC_COOKIE_POOL_ID_MAX)
+#define DP_NOT_PPDU_ID_WRAP_AROUND 20000
+
+enum ath11k_dp_ppdu_state {
+ DP_PPDU_STATUS_START,
+ DP_PPDU_STATUS_DONE,
+};
+
+struct ath11k_mon_data {
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct hal_rx_mon_ppdu_info mon_ppdu_info;
+
+ u32 mon_ppdu_status;
+ u32 mon_last_buf_cookie;
+ u64 mon_last_linkdesc_paddr;
+ u16 chan_noise_floor;
+ bool hold_mon_dst_ring;
+ enum dp_mon_status_buf_state buf_state;
+ dma_addr_t mon_status_paddr;
+ struct dp_full_mon_mpdu *mon_mpdu;
+ struct hal_sw_mon_ring_entries sw_mon_entries;
+ struct ath11k_pdev_mon_stats rx_mon_stats;
+ /* lock for monitor data */
+ spinlock_t mon_lock;
+ struct sk_buff_head rx_status_q;
+};
+
+struct ath11k_pdev_dp {
+ u32 mac_id;
+ u32 mon_dest_ring_stuck_cnt;
+ atomic_t num_tx_pending;
+ wait_queue_head_t tx_empty_waitq;
+ struct dp_rxdma_ring rx_refill_buf_ring;
+ struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng rxdma_mon_dst_ring;
+ struct dp_srng rxdma_mon_desc_ring;
+
+ struct dp_rxdma_ring rxdma_mon_buf_ring;
+ struct dp_rxdma_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
+ struct ieee80211_rx_status rx_status;
+ struct ath11k_mon_data mon_data;
+};
+
+#define DP_NUM_CLIENTS_MAX 64
+#define DP_AVG_TIDS_PER_CLIENT 2
+#define DP_NUM_TIDS_MAX (DP_NUM_CLIENTS_MAX * DP_AVG_TIDS_PER_CLIENT)
+#define DP_AVG_MSDUS_PER_FLOW 128
+#define DP_AVG_FLOWS_PER_TID 2
+#define DP_AVG_MPDUS_PER_TID_MAX 128
+#define DP_AVG_MSDUS_PER_MPDU 4
+
+#define DP_RX_HASH_ENABLE 1 /* Enable hash based Rx steering */
+
+#define DP_BA_WIN_SZ_MAX 256
+
+#define DP_TCL_NUM_RING_MAX 3
+#define DP_TCL_NUM_RING_MAX_QCA6390 1
+
+#define DP_IDLE_SCATTER_BUFS_MAX 16
+
+#define DP_WBM_RELEASE_RING_SIZE 64
+#define DP_TCL_DATA_RING_SIZE 512
+#define DP_TCL_DATA_RING_SIZE_WCN6750 2048
+#define DP_TX_COMP_RING_SIZE 32768
+#define DP_TX_IDR_SIZE DP_TX_COMP_RING_SIZE
+#define DP_TCL_CMD_RING_SIZE 32
+#define DP_TCL_STATUS_RING_SIZE 32
+#define DP_REO_DST_RING_MAX 4
+#define DP_REO_DST_RING_SIZE 2048
+#define DP_REO_REINJECT_RING_SIZE 32
+#define DP_RX_RELEASE_RING_SIZE 1024
+#define DP_REO_EXCEPTION_RING_SIZE 128
+#define DP_REO_CMD_RING_SIZE 256
+#define DP_REO_STATUS_RING_SIZE 2048
+#define DP_RXDMA_BUF_RING_SIZE 4096
+#define DP_RXDMA_REFILL_RING_SIZE 2048
+#define DP_RXDMA_ERR_DST_RING_SIZE 1024
+#define DP_RXDMA_MON_STATUS_RING_SIZE 1024
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE 4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE 2048
+#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
+
+#define DP_RX_RELEASE_RING_NUM 3
+
+#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_SIZE_LITE 1024
+#define DP_RX_BUFFER_ALIGN_SIZE 128
+
+#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
+#define DP_RXDMA_BUF_COOKIE_PDEV_ID GENMASK(20, 18)
+
+#define DP_HW2SW_MACID(mac_id) ((mac_id) ? ((mac_id) - 1) : 0)
+#define DP_SW2HW_MACID(mac_id) ((mac_id) + 1)
+
+#define DP_TX_DESC_ID_MAC_ID GENMASK(1, 0)
+#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
+#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+
+#define ATH11K_SHADOW_DP_TIMER_INTERVAL 20
+#define ATH11K_SHADOW_CTRL_TIMER_INTERVAL 10
+
+struct ath11k_hp_update_timer {
+ struct timer_list timer;
+ bool started;
+ bool init;
+ u32 tx_num;
+ u32 timer_tx_num;
+ u32 ring_id;
+ u32 interval;
+ struct ath11k_base *ab;
+};
+
+struct ath11k_dp {
+ struct ath11k_base *ab;
+ enum ath11k_htc_ep_id eid;
+ struct completion htt_tgt_version_received;
+ u8 htt_tgt_ver_major;
+ u8 htt_tgt_ver_minor;
+ struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ struct dp_srng wbm_idle_ring;
+ struct dp_srng wbm_desc_rel_ring;
+ struct dp_srng tcl_cmd_ring;
+ struct dp_srng tcl_status_ring;
+ struct dp_srng reo_reinject_ring;
+ struct dp_srng rx_rel_ring;
+ struct dp_srng reo_except_ring;
+ struct dp_srng reo_cmd_ring;
+ struct dp_srng reo_status_ring;
+ struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
+ struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
+ struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
+ struct list_head reo_cmd_list;
+ struct list_head reo_cmd_cache_flush_list;
+ struct list_head dp_full_mon_mpdu_list;
+ u32 reo_cmd_cache_flush_count;
+ /**
+ * protects access to below fields,
+ * - reo_cmd_list
+ * - reo_cmd_cache_flush_list
+ * - reo_cmd_cache_flush_count
+ */
+ spinlock_t reo_cmd_lock;
+ struct ath11k_hp_update_timer reo_cmd_timer;
+ struct ath11k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
+};
+
+/* HTT definitions */
+
+#define HTT_TCL_META_DATA_TYPE BIT(0)
+#define HTT_TCL_META_DATA_VALID_HTT BIT(1)
+
+/* vdev meta data */
+#define HTT_TCL_META_DATA_VDEV_ID GENMASK(9, 2)
+#define HTT_TCL_META_DATA_PDEV_ID GENMASK(11, 10)
+#define HTT_TCL_META_DATA_HOST_INSPECTED BIT(12)
+
+/* peer meta data */
+#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
+
+#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
+
+#define HTT_INVALID_PEER_ID 0xffff
+
+/* HTT tx completion is overlaid in wbm_release_ring */
+#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+#define HTT_TX_WBM_COMP_INFO0_REINJECT_REASON GENMASK(16, 13)
+
+#define HTT_TX_WBM_COMP_INFO1_ACK_RSSI GENMASK(31, 24)
+#define HTT_TX_WBM_COMP_INFO2_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_WBM_COMP_INFO2_VALID BIT(21)
+
+struct htt_tx_wbm_completion {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+} __packed;
+
+enum htt_h2t_msg_type {
+ HTT_H2T_MSG_TYPE_VERSION_REQ = 0,
+ HTT_H2T_MSG_TYPE_SRING_SETUP = 0xb,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG = 0xc,
+ HTT_H2T_MSG_TYPE_EXT_STATS_CFG = 0x10,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG = 0x11,
+ HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE = 0x17,
+};
+
+#define HTT_VER_REQ_INFO_MSG_ID GENMASK(7, 0)
+
+struct htt_ver_req_cmd {
+ u32 ver_reg_info;
+} __packed;
+
+enum htt_srng_ring_type {
+ HTT_HW_TO_SW_RING,
+ HTT_SW_TO_HW_RING,
+ HTT_SW_TO_SW_RING,
+};
+
+enum htt_srng_ring_id {
+ HTT_RXDMA_HOST_BUF_RING,
+ HTT_RXDMA_MONITOR_STATUS_RING,
+ HTT_RXDMA_MONITOR_BUF_RING,
+ HTT_RXDMA_MONITOR_DESC_RING,
+ HTT_RXDMA_MONITOR_DEST_RING,
+ HTT_HOST1_TO_FW_RXBUF_RING,
+ HTT_HOST2_TO_FW_RXBUF_RING,
+ HTT_RXDMA_NON_MONITOR_DEST_RING,
+};
+
+/* host -> target HTT_SRING_SETUP message
+ *
+ * After target is booted up, Host can send SRING setup message for
+ * each host facing LMAC SRING. Target setups up HW registers based
+ * on setup message and confirms back to Host if response_required is set.
+ * Host should wait for confirmation message before sending new SRING
+ * setup message
+ *
+ * The message would appear as follows:
+ *
+ * |31 24|23 20|19|18 16|15|14 8|7 0|
+ * |--------------- +-----------------+----------------+------------------|
+ * | ring_type | ring_id | pdev_id | msg_type |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_base_addr_hi |
+ * |----------------------------------------------------------------------|
+ * |ring_misc_cfg_flag|ring_entry_size| ring_size |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_head_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_tail_offset32_remote_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_lo |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_addr_hi |
+ * |----------------------------------------------------------------------|
+ * | ring_msi_data |
+ * |----------------------------------------------------------------------|
+ * | intr_timer_th |IM| intr_batch_counter_th |
+ * |----------------------------------------------------------------------|
+ * | reserved |RR|PTCF| intr_low_threshold |
+ * |----------------------------------------------------------------------|
+ * Where
+ * IM = sw_intr_mode
+ * RR = response_required
+ * PTCF = prefetch_timer_cfg
+ *
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_SRING_SETUP
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id: identify which ring is to setup,
+ * more details can be got from enum htt_srng_ring_id
+ * b'24:31 - ring_type: identify type of host rings,
+ * more details can be got from enum htt_srng_ring_type
+ * dword1 - b'0:31 - ring_base_addr_lo: Lower 32bits of ring base address
+ * dword2 - b'0:31 - ring_base_addr_hi: Upper 32bits of ring base address
+ * dword3 - b'0:15 - ring_size: size of the ring in unit of 4-bytes words
+ * b'16:23 - ring_entry_size: Size of each entry in 4-byte word units
+ * b'24:31 - ring_misc_cfg_flag: Valid only for HW_TO_SW_RING and
+ * SW_TO_HW_RING.
+ * Refer to HTT_SRING_SETUP_RING_MISC_CFG_RING defs.
+ * dword4 - b'0:31 - ring_head_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword5 - b'0:31 - ring_head_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the head
+ * element within the ring.
+ * (The head offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword6 - b'0:31 - ring_tail_off32_remote_addr_lo:
+ * Lower 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword7 - b'0:31 - ring_tail_off32_remote_addr_hi:
+ * Upper 32 bits of memory address of the remote variable
+ * storing the 4-byte word offset that identifies the tail
+ * element within the ring.
+ * (The tail offset variable has type u32.)
+ * Valid for HW_TO_SW and SW_TO_SW rings.
+ * dword8 - b'0:31 - ring_msi_addr_lo: Lower 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword9 - b'0:31 - ring_msi_addr_hi: Upper 32bits of MSI cfg address
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword10 - b'0:31 - ring_msi_data: MSI data
+ * Refer to HTT_SRING_SETUP_RING_MSC_CFG_xxx defs
+ * valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * dword11 - b'0:14 - intr_batch_counter_th:
+ * batch counter threshold is in units of 4-byte words.
+ * HW internally maintains and increments batch count.
+ * (see SRING spec for detail description).
+ * When batch count reaches threshold value, an interrupt
+ * is generated by HW.
+ * b'15 - sw_intr_mode:
+ * This configuration shall be static.
+ * Only programmed at power up.
+ * 0: generate pulse style sw interrupts
+ * 1: generate level style sw interrupts
+ * b'16:31 - intr_timer_th:
+ * The timer init value when timer is idle or is
+ * initialized to start downcounting.
+ * In 8us units (to cover a range of 0 to 524 ms)
+ * dword12 - b'0:15 - intr_low_threshold:
+ * Used only by Consumer ring to generate ring_sw_int_p.
+ * Ring entries low threshold water mark, that is used
+ * in combination with the interrupt timer as well as
+ * the clearing of the level interrupt.
+ * b'16:18 - prefetch_timer_cfg:
+ * Used only by Consumer ring to set timer mode to
+ * support Application prefetch handling.
+ * The external tail offset/pointer will be updated
+ * at following intervals:
+ * 3'b000: (Prefetch feature disabled; used only for debug)
+ * 3'b001: 1 usec
+ * 3'b010: 4 usec
+ * 3'b011: 8 usec (default)
+ * 3'b100: 16 usec
+ * Others: Reserved
+ * b'19 - response_required:
+ * Host needs HTT_T2H_MSG_TYPE_SRING_SETUP_DONE as response
+ * b'20:31 - reserved: reserved for future use
+ */
+
+#define HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE GENMASK(31, 24)
+
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE GENMASK(23, 16)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS BIT(25)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP BIT(27)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP BIT(28)
+#define HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP BIT(29)
+
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH GENMASK(14, 0)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_SW_INTR_MODE BIT(15)
+#define HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH GENMASK(31, 16)
+
+#define HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH GENMASK(15, 0)
+#define HTT_SRNG_SETUP_CMD_INFO2_PRE_FETCH_TIMER_CFG BIT(16)
+#define HTT_SRNG_SETUP_CMD_INFO2_RESPONSE_REQUIRED BIT(19)
+
+struct htt_srng_setup_cmd {
+ u32 info0;
+ u32 ring_base_addr_lo;
+ u32 ring_base_addr_hi;
+ u32 info1;
+ u32 ring_head_off32_remote_addr_lo;
+ u32 ring_head_off32_remote_addr_hi;
+ u32 ring_tail_off32_remote_addr_lo;
+ u32 ring_tail_off32_remote_addr_hi;
+ u32 ring_msi_addr_lo;
+ u32 ring_msi_addr_hi;
+ u32 msi_data;
+ u32 intr_info;
+ u32 info2;
+} __packed;
+
+/* host -> target FW PPDU_STATS config message
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW for PPDU_STATS_CFG msg.
+ * The message allows the host to configure the PPDU_STATS_IND messages
+ * produced by the target.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | REQ bit mask | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a req to configure ppdu_stats_ind from target
+ * Value: 0x11
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies which pdevs this PPDU stats configuration applies to
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - REQ_TLV_BIT_MASK
+ * Bits 16:31
+ * Purpose: each set bit indicates the corresponding PPDU stats TLV type
+ * needs to be included in the target's PPDU_STATS_IND messages.
+ * Value: refer htt_ppdu_stats_tlv_tag_t <<<???
+ *
+ */
+
+struct htt_ppdu_stats_cfg_cmd {
+ u32 msg;
+} __packed;
+
+#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
+#define HTT_PPDU_STATS_CFG_SOC_STATS BIT(8)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 9)
+#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
+
+enum htt_ppdu_stats_tag_type {
+ HTT_PPDU_STATS_TAG_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMMON,
+ HTT_PPDU_STATS_TAG_USR_RATE,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256,
+ HTT_PPDU_STATS_TAG_SCH_CMD_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS,
+ HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH,
+ HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY,
+ HTT_PPDU_STATS_TAG_INFO,
+ HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD,
+
+ /* New TLV's are added above to this line */
+ HTT_PPDU_STATS_TAG_MAX,
+};
+
+#define HTT_PPDU_STATS_TAG_DEFAULT (BIT(HTT_PPDU_STATS_TAG_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_RATE) \
+ | BIT(HTT_PPDU_STATS_TAG_SCH_CMD_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_FLUSH) \
+ | BIT(HTT_PPDU_STATS_TAG_USR_COMMON_ARRAY))
+
+#define HTT_PPDU_STATS_TAG_PKTLOG (BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_MPDU_ENQ_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_64) | \
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_BA_BITMAP_256) | \
+ BIT(HTT_PPDU_STATS_TAG_INFO) | \
+ BIT(HTT_PPDU_STATS_TAG_TX_MGMTCTRL_PAYLOAD) | \
+ HTT_PPDU_STATS_TAG_DEFAULT)
+
+/* HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG Message
+ *
+ * details:
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG message is sent by host to
+ * configure RXDMA rings.
+ * The configuration is per ring based and includes both packet subtypes
+ * and PPDU/MPDU TLVs.
+ *
+ * The message would appear as follows:
+ *
+ * |31 26|25|24|23 16|15 8|7 0|
+ * |-----------------+----------------+----------------+---------------|
+ * | rsvd1 |PS|SS| ring_id | pdev_id | msg_type |
+ * |-------------------------------------------------------------------|
+ * | rsvd2 | ring_buffer_size |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_0 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_1 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_2 |
+ * |-------------------------------------------------------------------|
+ * | packet_type_enable_flags_3 |
+ * |-------------------------------------------------------------------|
+ * | tlv_filter_in_flags |
+ * |-------------------------------------------------------------------|
+ * Where:
+ * PS = pkt_swap
+ * SS = status_swap
+ * The message is interpreted as follows:
+ * dword0 - b'0:7 - msg_type: This will be set to
+ * HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG
+ * b'8:15 - pdev_id:
+ * 0 (for rings at SOC/UMAC level),
+ * 1/2/3 mac id (for rings at LMAC level)
+ * b'16:23 - ring_id : Identify the ring to configure.
+ * More details can be got from enum htt_srng_ring_id
+ * b'24 - status_swap: 1 is to swap status TLV
+ * b'25 - pkt_swap: 1 is to swap packet TLV
+ * b'26:31 - rsvd1: reserved for future use
+ * dword1 - b'0:16 - ring_buffer_size: size of buffers referenced by rx ring,
+ * in byte units.
+ * Valid only for HW_TO_SW_RING and SW_TO_HW_RING
+ * - b'16:31 - rsvd2: Reserved for future use
+ * dword2 - b'0:31 - packet_type_enable_flags_0:
+ * Enable MGMT packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * FP: Filter_Pass
+ * MD: Monitor_Direct
+ * MO: Monitor_Other
+ * 10 mgmt subtypes * 3 bits -> 30 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG0_xxx_MGMT_xxx defs
+ * dword3 - b'0:31 - packet_type_enable_flags_1:
+ * Enable MGMT packet from 0b1010 to 0b1111
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG1_xxx_MGMT_xxx defs
+ * dword4 - b'0:31 - packet_type_enable_flags_2:
+ * Enable CTRL packet from 0b0000 to 0b1001
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG2_xxx_CTRL_xxx defs
+ * dword5 - b'0:31 - packet_type_enable_flags_3:
+ * Enable CTRL packet from 0b1010 to 0b1111,
+ * MCAST_DATA, UCAST_DATA, NULL_DATA
+ * bits from low to high: FP, MD, MO - 3 bits
+ * Refer to PKT_TYPE_ENABLE_FLAG3_xxx_CTRL_xxx defs
+ * dword6 - b'0:31 - tlv_filter_in_flags:
+ * Filter in Attention/MPDU/PPDU/Header/User tlvs
+ * Refer to CFG_TLV_FILTER_IN_FLAG defs
+ */
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID GENMASK(23, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS BIT(24)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS BIT(25)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE GENMASK(15, 0)
+
+enum htt_rx_filter_tlv_flags {
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START = BIT(0),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START = BIT(1),
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET = BIT(2),
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END = BIT(3),
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END = BIT(4),
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER = BIT(5),
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER = BIT(6),
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION = BIT(7),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START = BIT(8),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END = BIT(9),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS = BIT(10),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT = BIT(11),
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE = BIT(12),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP = BIT(17),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(18),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(19),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV = BIT(20),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(21),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(22),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7 = BIT(23),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(24),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(25),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON = BIT(26),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(27),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(28),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM = BIT(29),
+};
+
+enum htt_rx_mgmt_pkt_filter_tlv_flags1 {
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(0),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(1),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC = BIT(2),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(3),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(4),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH = BIT(5),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(6),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(7),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH = BIT(8),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(9),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(10),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION = BIT(11),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(12),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(13),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK = BIT(14),
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(15),
+ HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(16),
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15 = BIT(17),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags2 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP = BIT(17),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(18),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(19),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT = BIT(20),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(21),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(22),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER = BIT(23),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(24),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(25),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR = BIT(26),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(27),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(28),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA = BIT(29),
+};
+
+enum htt_rx_ctrl_pkt_filter_tlv_flags3 {
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(0),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(1),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL = BIT(2),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(3),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(4),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS = BIT(5),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(6),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(7),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS = BIT(8),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(9),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(10),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK = BIT(11),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(12),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(13),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND = BIT(14),
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(15),
+ HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(16),
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK = BIT(17),
+};
+
+enum htt_rx_data_pkt_filter_tlv_flasg3 {
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(18),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(19),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST = BIT(20),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(21),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(22),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST = BIT(23),
+ HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(24),
+ HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(25),
+ HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA = BIT(26),
+};
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_REASSOC_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_REQ \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_RESP \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_PROBE_TIMING_ADV \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_BEACON \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_ATIM)
+
+#define HTT_RX_FP_MGMT_FILTER_FLAGS1 (HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MD_MGMT_FILTER_FLAGS1 (HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MD_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_MO_MGMT_FILTER_FLAGS1 (HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DISASSOC \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_AUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_DEAUTH \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION \
+ | HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_ACTION_NOACK)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG2 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG2 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG2 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_WRAPPER \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BAR \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_BA)
+
+#define HTT_RX_FP_CTRL_FILTER_FLASG3 (HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MD_CTRL_FILTER_FLASG3 (HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MD_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_MO_CTRL_FILTER_FLASG3 (HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_PSPOLL \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_RTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CTS \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_ACK \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND \
+ | HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS3_CFEND_ACK)
+
+#define HTT_RX_FP_DATA_FILTER_FLASG3 (HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MD_DATA_FILTER_FLASG3 (HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MD_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MO_DATA_FILTER_FLASG3 (HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_MCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_UCAST \
+ | HTT_RX_MO_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS0 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS0 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS0_RESERVED_7)
+
+#define HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_FP_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_FP_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_MO_MGMT_FILTER_FLAGS1 \
+ (HTT_RX_MO_MGMT_FILTER_FLAGS1 | \
+ HTT_RX_MO_MGMT_PKT_FILTER_TLV_FLAGS1_RESERVED_15)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG2 \
+ (HTT_RX_FP_CTRL_FILTER_FLASG2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG2 \
+ (HTT_RX_MO_CTRL_FILTER_FLASG2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_1 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_2 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_TRIGGER | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_RESERVED_4 | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_BF_REP_POLL | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_VHT_NDP | \
+ HTT_RX_MO_CTRL_PKT_FILTER_TLV_FLAGS2_CTRL_FRAME_EXT)
+
+#define HTT_RX_MON_FP_CTRL_FILTER_FLASG3 HTT_RX_FP_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_CTRL_FILTER_FLASG3 HTT_RX_MO_CTRL_FILTER_FLASG3
+
+#define HTT_RX_MON_FP_DATA_FILTER_FLASG3 HTT_RX_FP_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_MO_DATA_FILTER_FLASG3 HTT_RX_MO_DATA_FILTER_FLASG3
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE)
+
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING \
+ (HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
+
+struct htt_rx_ring_selection_cfg_cmd {
+ u32 info0;
+ u32 info1;
+ u32 pkt_type_en_flags0;
+ u32 pkt_type_en_flags1;
+ u32 pkt_type_en_flags2;
+ u32 pkt_type_en_flags3;
+ u32 rx_filter_tlv;
+} __packed;
+
+struct htt_rx_ring_tlv_filter {
+ u32 rx_filter; /* see htt_rx_filter_tlv_flags */
+ u32 pkt_filter_flags0; /* MGMT */
+ u32 pkt_filter_flags1; /* MGMT */
+ u32 pkt_filter_flags2; /* CTRL */
+ u32 pkt_filter_flags3; /* DATA */
+};
+
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE GENMASK(7, 0)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID GENMASK(15, 8)
+
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE BIT(0)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END BIT(1)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END BIT(2)
+#define HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING GENMASK(10, 3)
+
+/* Enumeration for full monitor mode destination ring select
+ * 0 - REO destination ring select
+ * 1 - FW destination ring select
+ * 2 - SW destination ring select
+ * 3 - Release destination ring select
+ */
+enum htt_rx_full_mon_release_ring {
+ HTT_RX_MON_RING_REO,
+ HTT_RX_MON_RING_FW,
+ HTT_RX_MON_RING_SW,
+ HTT_RX_MON_RING_RELEASE,
+};
+
+struct htt_rx_full_monitor_mode_cfg_cmd {
+ u32 info0;
+ u32 cfg;
+} __packed;
+
+/* HTT message target->host */
+
+enum htt_t2h_msg_type {
+ HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
+ HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
+ HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
+ HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
+ HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f,
+ HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
+ HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
+ HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
+};
+
+#define HTT_TARGET_VERSION_MAJOR 3
+
+#define HTT_T2H_MSG_TYPE GENMASK(7, 0)
+#define HTT_T2H_VERSION_CONF_MINOR GENMASK(15, 8)
+#define HTT_T2H_VERSION_CONF_MAJOR GENMASK(23, 16)
+
+struct htt_t2h_version_conf_msg {
+ u32 version;
+} __packed;
+
+#define HTT_T2H_PEER_MAP_INFO_VDEV_ID GENMASK(15, 8)
+#define HTT_T2H_PEER_MAP_INFO_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16 GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID GENMASK(31, 16)
+#define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M BIT(16)
+#define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S 16
+
+struct htt_t2h_peer_map_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+ u32 info2;
+} __packed;
+
+#define HTT_T2H_PEER_UNMAP_INFO_VDEV_ID HTT_T2H_PEER_MAP_INFO_VDEV_ID
+#define HTT_T2H_PEER_UNMAP_INFO_PEER_ID HTT_T2H_PEER_MAP_INFO_PEER_ID
+#define HTT_T2H_PEER_UNMAP_INFO1_MAC_ADDR_H16 \
+ HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_M HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M
+#define HTT_T2H_PEER_MAP_INFO1_NEXT_HOP_S HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S
+
+struct htt_t2h_peer_unmap_event {
+ u32 info;
+ u32 mac_addr_l32;
+ u32 info1;
+} __packed;
+
+struct htt_resp_msg {
+ union {
+ struct htt_t2h_version_conf_msg version_msg;
+ struct htt_t2h_peer_map_event peer_map_ev;
+ struct htt_t2h_peer_unmap_event peer_unmap_ev;
+ };
+} __packed;
+
+#define HTT_BACKPRESSURE_EVENT_PDEV_ID_M GENMASK(15, 8)
+#define HTT_BACKPRESSURE_EVENT_RING_TYPE_M GENMASK(23, 16)
+#define HTT_BACKPRESSURE_EVENT_RING_ID_M GENMASK(31, 24)
+
+#define HTT_BACKPRESSURE_EVENT_HP_M GENMASK(15, 0)
+#define HTT_BACKPRESSURE_EVENT_TP_M GENMASK(31, 16)
+
+#define HTT_BACKPRESSURE_UMAC_RING_TYPE 0
+#define HTT_BACKPRESSURE_LMAC_RING_TYPE 1
+
+enum htt_backpressure_umac_ringid {
+ HTT_SW_RING_IDX_REO_REO2SW1_RING,
+ HTT_SW_RING_IDX_REO_REO2SW2_RING,
+ HTT_SW_RING_IDX_REO_REO2SW3_RING,
+ HTT_SW_RING_IDX_REO_REO2SW4_RING,
+ HTT_SW_RING_IDX_REO_WBM2REO_LINK_RING,
+ HTT_SW_RING_IDX_REO_REO2TCL_RING,
+ HTT_SW_RING_IDX_REO_REO2FW_RING,
+ HTT_SW_RING_IDX_REO_REO_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_PPE_RELEASE_RING,
+ HTT_SW_RING_IDX_TCL_TCL2TQM_RING,
+ HTT_SW_RING_IDX_WBM_TQM_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_REO_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_WBM2SW0_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_WBM2SW1_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_WBM2SW2_RELEASE_RING,
+ HTT_SW_RING_IDX_WBM_WBM2SW3_RELEASE_RING,
+ HTT_SW_RING_IDX_REO_REO_CMD_RING,
+ HTT_SW_RING_IDX_REO_REO_STATUS_RING,
+ HTT_SW_UMAC_RING_IDX_MAX,
+};
+
+enum htt_backpressure_lmac_ringid {
+ HTT_SW_RING_IDX_FW2RXDMA_BUF_RING,
+ HTT_SW_RING_IDX_FW2RXDMA_STATUS_RING,
+ HTT_SW_RING_IDX_FW2RXDMA_LINK_RING,
+ HTT_SW_RING_IDX_SW2RXDMA_BUF_RING,
+ HTT_SW_RING_IDX_WBM2RXDMA_LINK_RING,
+ HTT_SW_RING_IDX_RXDMA2FW_RING,
+ HTT_SW_RING_IDX_RXDMA2SW_RING,
+ HTT_SW_RING_IDX_RXDMA2RELEASE_RING,
+ HTT_SW_RING_IDX_RXDMA2REO_RING,
+ HTT_SW_RING_IDX_MONITOR_STATUS_RING,
+ HTT_SW_RING_IDX_MONITOR_BUF_RING,
+ HTT_SW_RING_IDX_MONITOR_DESC_RING,
+ HTT_SW_RING_IDX_MONITOR_DEST_RING,
+ HTT_SW_LMAC_RING_IDX_MAX,
+};
+
+/* ppdu stats
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host ppdu stats indication message.
+ *
+ *
+ * |31 16|15 12|11 10|9 8|7 0 |
+ * |----------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id | msg type |
+ * |----------------------------------------------------------------------|
+ * | ppdu_id |
+ * |----------------------------------------------------------------------|
+ * | Timestamp in us |
+ * |----------------------------------------------------------------------|
+ * | reserved |
+ * |----------------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_ppdu_stats.h) |
+ * |----------------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a PPDU STATS indication
+ * message.
+ * Value: 0x1d
+ * - mac_id
+ * Bits 9:8
+ * Purpose: mac_id of this ppdu_id
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id of this ppdu_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: total tlv size
+ * Value: payload_size in bytes
+ */
+
+#define HTT_T2H_PPDU_STATS_INFO_PDEV_ID GENMASK(11, 10)
+#define HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE GENMASK(31, 16)
+
+struct ath11k_htt_ppdu_stats_msg {
+ u32 info;
+ u32 ppdu_id;
+ u32 timestamp;
+ u32 rsvd;
+ u8 data[];
+} __packed;
+
+struct htt_tlv {
+ u32 header;
+ u8 value[];
+} __packed;
+
+#define HTT_TLV_TAG GENMASK(11, 0)
+#define HTT_TLV_LEN GENMASK(23, 12)
+
+enum HTT_PPDU_STATS_BW {
+ HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
+ HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
+ HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
+ HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
+ HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
+ HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+ HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
+};
+
+#define HTT_PPDU_STATS_CMN_FLAGS_FRAME_TYPE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_CMN_FLAGS_QUEUE_TYPE_M GENMASK(15, 8)
+/* bw - HTT_PPDU_STATS_BW */
+#define HTT_PPDU_STATS_CMN_FLAGS_BW_M GENMASK(19, 16)
+
+struct htt_ppdu_stats_common {
+ u32 ppdu_id;
+ u16 sched_cmdid;
+ u8 ring_id;
+ u8 num_users;
+ u32 flags; /* %HTT_PPDU_STATS_COMMON_FLAGS_*/
+ u32 chain_mask;
+ u32 fes_duration_us; /* frame exchange sequence */
+ u32 ppdu_sch_eval_start_tstmp_us;
+ u32 ppdu_sch_end_tstmp_us;
+ u32 ppdu_start_tstmp_us;
+ /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+ * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ */
+ u16 phy_mode;
+ u16 bw_mhz;
+} __packed;
+
+enum htt_ppdu_stats_gi {
+ HTT_PPDU_STATS_SGI_0_8_US,
+ HTT_PPDU_STATS_SGI_0_4_US,
+ HTT_PPDU_STATS_SGI_1_6_US,
+ HTT_PPDU_STATS_SGI_3_2_US,
+};
+
+#define HTT_PPDU_STATS_USER_RATE_INFO0_USER_POS_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USER_RATE_INFO0_MU_GROUP_ID_M GENMASK(11, 4)
+
+#define HTT_PPDU_STATS_USER_RATE_INFO1_RESP_TYPE_VALD_M BIT(0)
+#define HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M GENMASK(5, 1)
+
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M BIT(29)
+
+#define HTT_USR_RATE_PREAMBLE(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M, _val)
+#define HTT_USR_RATE_BW(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_BW_M, _val)
+#define HTT_USR_RATE_NSS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_NSS_M, _val)
+#define HTT_USR_RATE_MCS(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_MCS_M, _val)
+#define HTT_USR_RATE_GI(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_GI_M, _val)
+#define HTT_USR_RATE_DCM(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M, _val)
+
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LTF_SIZE_M GENMASK(1, 0)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_STBC_M BIT(2)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_HE_RE_M BIT(3)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_TXBF_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_BW_M GENMASK(11, 8)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_NSS_M GENMASK(15, 12)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_MCS_M GENMASK(19, 16)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_PREAMBLE_M GENMASK(23, 20)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_GI_M GENMASK(27, 24)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_DCM_M BIT(28)
+#define HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_LDPC_M BIT(29)
+
+struct htt_ppdu_stats_user_rate {
+ u8 tid_num;
+ u8 reserved0;
+ u16 sw_peer_id;
+ u32 info0; /* %HTT_PPDU_STATS_USER_RATE_INFO0_*/
+ u16 ru_end;
+ u16 ru_start;
+ u16 resp_ru_end;
+ u16 resp_ru_start;
+ u32 info1; /* %HTT_PPDU_STATS_USER_RATE_INFO1_ */
+ u32 rate_flags; /* %HTT_PPDU_STATS_USER_RATE_FLAGS_ */
+ /* Note: resp_rate_info is only valid for if resp_type is UL */
+ u32 resp_rate_flags; /* %HTT_PPDU_STATS_USER_RATE_RESP_FLAGS_ */
+} __packed;
+
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M GENMASK(7, 0)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M GENMASK(10, 9)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_BW_M GENMASK(13, 11)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_SGI_M BIT(14)
+#define HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M GENMASK(31, 16)
+
+#define HTT_TX_INFO_IS_AMSDU(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_IS_AMPDU_M, _flags)
+#define HTT_TX_INFO_BA_ACK_FAILED(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_BA_ACK_FAILED_M, _flags)
+#define HTT_TX_INFO_RATECODE(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_RATECODE_M, _flags)
+#define HTT_TX_INFO_PEERID(_flags) \
+ FIELD_GET(HTT_PPDU_STATS_TX_INFO_FLAGS_PEERID_M, _flags)
+
+struct htt_tx_ppdu_stats_info {
+ struct htt_tlv tlv_hdr;
+ u32 tx_success_bytes;
+ u32 tx_retry_bytes;
+ u32 tx_failed_bytes;
+ u32 flags; /* %HTT_PPDU_STATS_TX_INFO_FLAGS_ */
+ u16 tx_success_msdus;
+ u16 tx_retry_msdus;
+ u16 tx_failed_msdus;
+ u16 tx_duration; /* united in us */
+} __packed;
+
+enum htt_ppdu_stats_usr_compln_status {
+ HTT_PPDU_STATS_USER_STATUS_OK,
+ HTT_PPDU_STATS_USER_STATUS_FILTERED,
+ HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+ HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+ HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M GENMASK(3, 0)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M GENMASK(7, 4)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M BIT(8)
+#define HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_RESP_TYPE_M GENMASK(12, 9)
+
+#define HTT_USR_CMPLTN_IS_AMPDU(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_IS_AMPDU_M, _val)
+#define HTT_USR_CMPLTN_LONG_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRY_M, _val)
+#define HTT_USR_CMPLTN_SHORT_RETRY(_val) \
+ FIELD_GET(HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_SHORT_RETRY_M, _val)
+
+struct htt_ppdu_stats_usr_cmpltn_cmn {
+ u8 status;
+ u8 tid_num;
+ u16 sw_peer_id;
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ u32 ack_rssi;
+ u16 mpdu_tried;
+ u16 mpdu_success;
+ u32 flags; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_FLAGS_LONG_RETRIES*/
+} __packed;
+
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MPDU_M GENMASK(8, 0)
+#define HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M GENMASK(24, 9)
+#define HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM GENMASK(31, 25)
+
+#define HTT_PPDU_STATS_NON_QOS_TID 16
+
+struct htt_ppdu_stats_usr_cmpltn_ack_ba_status {
+ u32 ppdu_id;
+ u16 sw_peer_id;
+ u16 reserved0;
+ u32 info; /* %HTT_PPDU_STATS_USR_CMPLTN_CMN_INFO_ */
+ u16 current_seq;
+ u16 start_seq;
+ u32 success_bytes;
+} __packed;
+
+struct htt_ppdu_stats_usr_cmn_array {
+ struct htt_tlv tlv_hdr;
+ u32 num_ppdu_stats;
+ /* tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
+ * elements.
+ * tx_ppdu_stats_info is variable length, with length =
+ * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
+ */
+ struct htt_tx_ppdu_stats_info tx_ppdu_info[];
+} __packed;
+
+struct htt_ppdu_user_stats {
+ u16 peer_id;
+ u32 tlv_flags;
+ bool is_valid_peer_id;
+ struct htt_ppdu_stats_user_rate rate;
+ struct htt_ppdu_stats_usr_cmpltn_cmn cmpltn_cmn;
+ struct htt_ppdu_stats_usr_cmpltn_ack_ba_status ack_ba;
+};
+
+#define HTT_PPDU_STATS_MAX_USERS 8
+#define HTT_PPDU_DESC_MAX_DEPTH 16
+
+struct htt_ppdu_stats {
+ struct htt_ppdu_stats_common common;
+ struct htt_ppdu_user_stats user_stats[HTT_PPDU_STATS_MAX_USERS];
+};
+
+struct htt_ppdu_stats_info {
+ u32 ppdu_id;
+ struct htt_ppdu_stats ppdu_stats;
+ struct list_head list;
+};
+
+/* @brief target -> host packet log message
+ *
+ * @details
+ * The following field definitions describe the format of the packet log
+ * message sent from the target to the host.
+ * The message consists of a 4-octet header,followed by a variable number
+ * of 32-bit character values.
+ *
+ * |31 16|15 12|11 10|9 8|7 0|
+ * |------------------------------------------------------------------|
+ * | payload_size | rsvd |pdev_id|mac_id| msg type |
+ * |------------------------------------------------------------------|
+ * | payload |
+ * |------------------------------------------------------------------|
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this as a pktlog message
+ * Value: HTT_T2H_MSG_TYPE_PKTLOG
+ * - mac_id
+ * Bits 9:8
+ * Purpose: identifies which MAC/PHY instance generated this pktlog info
+ * Value: 0-3
+ * - pdev_id
+ * Bits 11:10
+ * Purpose: pdev_id
+ * Value: 0-3
+ * 0 (for rings at SOC level),
+ * 1/2/3 PDEV -> 0/1/2
+ * - payload_size
+ * Bits 31:16
+ * Purpose: explicitly specify the payload size
+ * Value: payload size in bytes (payload size is a multiple of 4 bytes)
+ */
+struct htt_pktlog_msg {
+ u32 hdr;
+ u8 payload[];
+};
+
+/* @brief host -> target FW extended statistics retrieve
+ *
+ * @details
+ * The following field definitions describe the format of the HTT host
+ * to target FW extended stats retrieve message.
+ * The message specifies the type of stats the host wants to retrieve.
+ *
+ * |31 24|23 16|15 8|7 0|
+ * |-----------------------------------------------------------|
+ * | reserved | stats type | pdev_mask | msg type |
+ * |-----------------------------------------------------------|
+ * | config param [0] |
+ * |-----------------------------------------------------------|
+ * | config param [1] |
+ * |-----------------------------------------------------------|
+ * | config param [2] |
+ * |-----------------------------------------------------------|
+ * | config param [3] |
+ * |-----------------------------------------------------------|
+ * | reserved |
+ * |-----------------------------------------------------------|
+ * | cookie LSBs |
+ * |-----------------------------------------------------------|
+ * | cookie MSBs |
+ * |-----------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: identifies this is a extended stats upload request message
+ * Value: 0x10
+ * - PDEV_MASK
+ * Bits 8:15
+ * Purpose: identifies the mask of PDEVs to retrieve stats from
+ * Value: This is a overloaded field, refer to usage and interpretation of
+ * PDEV in interface document.
+ * Bit 8 : Reserved for SOC stats
+ * Bit 9 - 15 : Indicates PDEV_MASK in DBDC
+ * Indicates MACID_MASK in DBS
+ * - STATS_TYPE
+ * Bits 23:16
+ * Purpose: identifies which FW statistics to upload
+ * Value: Defined by htt_dbg_ext_stats_type (see htt_stats.h)
+ * - Reserved
+ * Bits 31:24
+ * - CONFIG_PARAM [0]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [1]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [2]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - CONFIG_PARAM [3]
+ * Bits 31:0
+ * Purpose: give an opaque configuration value to the specified stats type
+ * Value: stats-type specific configuration value
+ * Refer to htt_stats.h for interpretation for each stats sub_type
+ * - Reserved [31:0] for future use.
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ */
+
+struct htt_ext_stats_cfg_hdr {
+ u8 msg_type;
+ u8 pdev_mask;
+ u8 stats_type;
+ u8 reserved;
+} __packed;
+
+struct htt_ext_stats_cfg_cmd {
+ struct htt_ext_stats_cfg_hdr hdr;
+ u32 cfg_param0;
+ u32 cfg_param1;
+ u32 cfg_param2;
+ u32 cfg_param3;
+ u32 reserved;
+ u32 cookie_lsb;
+ u32 cookie_msb;
+} __packed;
+
+/* htt stats config default params */
+#define HTT_STAT_DEFAULT_RESET_START_OFFSET 0
+#define HTT_STAT_DEFAULT_CFG0_ALL_HWQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_TXQS 0xffffffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_CMDQS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ALL_RINGS 0xffff
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_PEERS 0xff
+#define HTT_STAT_DEFAULT_CFG0_CCA_CUMULATIVE 0x00
+#define HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS 0x00
+
+/* HTT_DBG_EXT_STATS_PEER_INFO
+ * PARAMS:
+ * @config_param0:
+ * [Bit0] - [0] for sw_peer_id, [1] for mac_addr based request
+ * [Bit15 : Bit 1] htt_peer_stats_req_mode_t
+ * [Bit31 : Bit16] sw_peer_id
+ * @config_param1:
+ * peer_stats_req_type_mask:32 (enum htt_peer_stats_tlv_enum)
+ * 0 bit htt_peer_stats_cmn_tlv
+ * 1 bit htt_peer_details_tlv
+ * 2 bit htt_tx_peer_rate_stats_tlv
+ * 3 bit htt_rx_peer_rate_stats_tlv
+ * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
+ * 5 bit htt_rx_tid_stats_tlv
+ * 6 bit htt_msdu_flow_stats_tlv
+ * @config_param2: [Bit31 : Bit0] mac_addr31to0
+ * @config_param3: [Bit15 : Bit0] mac_addr47to32
+ * [Bit31 : Bit16] reserved
+ */
+#define HTT_STAT_PEER_INFO_MAC_ADDR BIT(0)
+#define HTT_STAT_DEFAULT_PEER_REQ_TYPE 0x7f
+
+/* Used to set different configs to the specified stats type.*/
+struct htt_ext_stats_cfg_params {
+ u32 cfg0;
+ u32 cfg1;
+ u32 cfg2;
+ u32 cfg3;
+};
+
+/* @brief target -> host extended statistics upload
+ *
+ * @details
+ * The following field definitions describe the format of the HTT target
+ * to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats indication
+ * will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31 16|15 12|11|10 8|7 5|4 0|
+ * |--------------------------------------------------------------|
+ * | reserved | msg type |
+ * |--------------------------------------------------------------|
+ * | cookie LSBs |
+ * |--------------------------------------------------------------|
+ * | cookie MSBs |
+ * |--------------------------------------------------------------|
+ * | stats entry length | rsvd | D| S | stat type |
+ * |--------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see htt_stats.h) |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a extended statistics upload confirmation
+ * message.
+ * Value: 0x1c
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: LSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 7:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: htt_dbg_ext_stats_type
+ * - STATUS
+ * Bits 10:8
+ * Purpose: indicate whether the requested stats are present
+ * Value: htt_dbg_ext_stats_status
+ * - DONE
+ * Bits 11
+ * Purpose:
+ * Indicates the completion of the stats entry, this will be the last
+ * stats conf HTT segment for the requested stats type.
+ * Value:
+ * 0 -> the stats retrieval is ongoing
+ * 1 -> the stats retrieval is complete
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes.
+ */
+
+#define HTT_T2H_EXT_STATS_INFO1_DONE BIT(11)
+#define HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
+
+struct ath11k_htt_extd_stats_msg {
+ u32 info0;
+ u64 cookie;
+ u32 info1;
+ u8 data[];
+} __packed;
+
+#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
+#define HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
+#define HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
+#define HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+
+struct htt_mac_addr {
+ u32 mac_addr_l32;
+ u32 mac_addr_h16;
+};
+
+static inline void ath11k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
+{
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
+ addr_l32 = swab32(addr_l32);
+ addr_h16 = swab16(addr_h16);
+ }
+
+ memcpy(addr, &addr_l32, 4);
+ memcpy(addr + 4, &addr_h16, ETH_ALEN - 4);
+}
+
+int ath11k_dp_service_srng(struct ath11k_base *ab,
+ struct ath11k_ext_irq_grp *irq_grp,
+ int budget);
+int ath11k_dp_htt_connect(struct ath11k_dp *dp);
+void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif);
+void ath11k_dp_free(struct ath11k_base *ab);
+int ath11k_dp_alloc(struct ath11k_base *ab);
+int ath11k_dp_pdev_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab);
+void ath11k_dp_pdev_free(struct ath11k_base *ab);
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type);
+int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr);
+void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring);
+int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
+ enum hal_ring_type type, int ring_num,
+ int mac_id, int num_entries);
+void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *desc_bank,
+ u32 ring_type, struct dp_srng *ring);
+int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
+ struct dp_link_desc_bank *link_desc_banks,
+ u32 ring_type, struct hal_srng *srng,
+ u32 n_link_desc);
+void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
+ struct hal_srng *srng,
+ struct ath11k_hp_update_timer *update_timer);
+void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer);
+void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer,
+ u32 interval, u32 ring_id);
+void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
new file mode 100644
index 0000000000..a993e74bba
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -0,0 +1,5745 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ieee80211.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <crypto/hash.h>
+#include "core.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs_sta.h"
+#include "hal_desc.h"
+#include "hw.h"
+#include "dp_rx.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "peer.h"
+
+#define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
+
+static inline
+u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
+}
+
+static inline
+enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
+ return HAL_ENCRYPT_TYPE_OPEN;
+
+ return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
+}
+
+static inline
+bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
+}
+
+static inline
+u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
+}
+
+static inline
+bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
+}
+
+static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
+}
+
+static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
+ return ieee80211_has_morefrags(hdr->frame_control);
+}
+
+static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
+ return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
+}
+
+static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
+}
+
+static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
+}
+
+static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
+ __le32_to_cpu(attn->info2));
+}
+
+static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
+ __le32_to_cpu(attn->info1));
+}
+
+static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
+{
+ return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
+ __le32_to_cpu(attn->info1));
+}
+
+static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
+{
+ return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
+ __le32_to_cpu(attn->info2)) ==
+ RX_DESC_DECRYPT_STATUS_CODE_OK);
+}
+
+static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
+{
+ u32 info = __le32_to_cpu(attn->info1);
+ u32 errmap = 0;
+
+ if (info & RX_ATTENTION_INFO1_FCS_ERR)
+ errmap |= DP_RX_MPDU_ERR_FCS;
+
+ if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
+ errmap |= DP_RX_MPDU_ERR_DECRYPT;
+
+ if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
+ errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
+
+ if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
+ errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
+
+ if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
+ errmap |= DP_RX_MPDU_ERR_OVERFLOW;
+
+ if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
+
+ if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
+ errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
+
+ return errmap;
+}
+
+static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *rx_attention;
+ u32 errmap;
+
+ rx_attention = ath11k_dp_rx_get_attention(ab, desc);
+ errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+
+ return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
+}
+
+static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
+}
+
+static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
+}
+
+static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
+}
+
+static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
+}
+
+static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
+}
+
+static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
+}
+
+static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
+}
+
+static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
+ struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
+}
+
+static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
+{
+ return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
+ __le32_to_cpu(attn->info1));
+}
+
+static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ u8 *rx_pkt_hdr;
+
+ rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
+
+ return rx_pkt_hdr;
+}
+
+static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ u32 tlv_tag;
+
+ tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
+
+ return tlv_tag == HAL_RX_MPDU_START;
+}
+
+static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
+ struct hal_rx_desc *rx_desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
+}
+
+static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
+ struct hal_rx_desc *desc,
+ u16 len)
+{
+ ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
+}
+
+static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
+
+ return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
+ (!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
+ __le32_to_cpu(attn->info1)));
+}
+
+static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
+}
+
+static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
+}
+
+static void ath11k_dp_service_mon_ring(struct timer_list *t)
+{
+ struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
+ int i;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
+
+ mod_timer(&ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+}
+
+static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
+{
+ int i, reaped = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
+
+ do {
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ reaped += ath11k_dp_rx_process_mon_rings(ab, i,
+ NULL,
+ DP_MON_SERVICE_BUDGET);
+
+ /* nothing more to reap */
+ if (reaped < DP_MON_SERVICE_BUDGET)
+ return 0;
+
+ } while (time_before(jiffies, timeout));
+
+ ath11k_warn(ab, "dp mon ring purge timeout");
+
+ return -ETIMEDOUT;
+}
+
+/* Returns number of Rx buffers replenished */
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+ if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
+ req_entries = num_free;
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+ if (!skb)
+ break;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb,
+ PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
+ (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (buf_id <= 0)
+ goto fail_dma_unmap;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_idr_remove;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_idr_remove:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct sk_buff *skb;
+ int buf_id;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ /* TODO: Understand where internal driver does this dma_unmap
+ * of rxdma_buffer.
+ */
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+
+ idr_destroy(&rx_ring->bufs_idr);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int i;
+
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ rx_ring = &dp->rx_mon_status_refill_ring[i];
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+ }
+
+ return 0;
+}
+
+static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
+ struct dp_rxdma_ring *rx_ring,
+ u32 ringtype)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ int num_entries;
+
+ num_entries = rx_ring->refill_buf_ring.size /
+ ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
+
+ rx_ring->bufs_max = num_entries;
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
+ ar->ab->hw_params.hal_params->rx_buf_rbm);
+ return 0;
+}
+
+static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int i;
+
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
+
+ if (ar->ab->hw_params.rxdma1_enable) {
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ rx_ring = &dp->rx_mon_status_refill_ring[i];
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
+ }
+
+ return 0;
+}
+
+static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ int i;
+
+ ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ if (ab->hw_params.rx_mac_buf_ring)
+ ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+
+ ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
+ ath11k_dp_srng_cleanup(ab,
+ &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
+ }
+
+ ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+}
+
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int i;
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++)
+ ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
+}
+
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ int ret;
+ int i;
+
+ for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
+ ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
+ HAL_REO_DST, i, 0,
+ DP_REO_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup reo_dst_ring\n");
+ goto err_reo_cleanup;
+ }
+ }
+
+ return 0;
+
+err_reo_cleanup:
+ ath11k_dp_pdev_reo_cleanup(ab);
+
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ struct dp_srng *srng = NULL;
+ int i;
+ int ret;
+
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rx_refill_buf_ring.refill_buf_ring,
+ HAL_RXDMA_BUF, 0,
+ dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
+ return ret;
+ }
+
+ if (ar->ab->hw_params.rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rx_mac_buf_ring[i],
+ HAL_RXDMA_BUF, 1,
+ dp->mac_id + i, 1024);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
+ i);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
+ HAL_RXDMA_DST, 0, dp->mac_id + i,
+ DP_RXDMA_ERR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ret = ath11k_dp_srng_setup(ar->ab,
+ srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup rx_mon_status_refill_ring %d\n", i);
+ return ret;
+ }
+ }
+
+ /* if rxdma1_enable is false, then it doesn't need
+ * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
+ * and rxdma_mon_desc_ring.
+ * init reap timer for QCA6390.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable) {
+ //init mon status buffer reap timer
+ timer_setup(&ar->ab->mon_reap_timer,
+ ath11k_dp_service_mon_ring, 0);
+ return 0;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rxdma_mon_buf_ring.refill_buf_ring,
+ HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_BUF_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_BUF\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
+ HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DST\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
+ HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
+ DP_RXDMA_MONITOR_DESC_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup HAL_RXDMA_MONITOR_DESC\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *cmd, *tmp;
+ struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
+ struct dp_rx_tid *rx_tid;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ list_del(&cmd->list);
+ rx_tid = &cmd->data;
+ if (rx_tid->vaddr) {
+ dma_unmap_single(ab->dev, rx_tid->paddr,
+ rx_tid->size, DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+ }
+ kfree(cmd);
+ }
+
+ list_for_each_entry_safe(cmd_cache, tmp_cache,
+ &dp->reo_cmd_cache_flush_list, list) {
+ list_del(&cmd_cache->list);
+ dp->reo_cmd_cache_flush_count--;
+ rx_tid = &cmd_cache->data;
+ if (rx_tid->vaddr) {
+ dma_unmap_single(ab->dev, rx_tid->paddr,
+ rx_tid->size, DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+ }
+ kfree(cmd_cache);
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+}
+
+static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct dp_rx_tid *rx_tid = ctx;
+
+ if (status != HAL_REO_CMD_SUCCESS)
+ ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
+ rx_tid->tid, status);
+ if (rx_tid->vaddr) {
+ dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+ }
+}
+
+static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
+ struct dp_rx_tid *rx_tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ unsigned long tot_desc_sz, desc_sz;
+ int ret;
+
+ tot_desc_sz = rx_tid->size;
+ desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+ while (tot_desc_sz > desc_sz) {
+ tot_desc_sz -= desc_sz;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE, &cmd,
+ NULL);
+ if (ret)
+ ath11k_warn(ab,
+ "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_FLUSH_CACHE,
+ &cmd, ath11k_dp_reo_cmd_free);
+ if (ret) {
+ ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+ }
+}
+
+static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
+ enum hal_reo_cmd_status status)
+{
+ struct ath11k_base *ab = dp->ab;
+ struct dp_rx_tid *rx_tid = ctx;
+ struct dp_reo_cache_flush_elem *elem, *tmp;
+
+ if (status == HAL_REO_CMD_DRAIN) {
+ goto free_desc;
+ } else if (status != HAL_REO_CMD_SUCCESS) {
+ /* Shouldn't happen! Cleanup in case of other failure? */
+ ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
+ rx_tid->tid, status);
+ return;
+ }
+
+ elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
+ if (!elem)
+ goto free_desc;
+
+ elem->ts = jiffies;
+ memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
+ dp->reo_cmd_cache_flush_count++;
+
+ /* Flush and invalidate aged REO desc from HW cache */
+ list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
+ list) {
+ if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
+ time_after(jiffies, elem->ts +
+ msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
+ list_del(&elem->list);
+ dp->reo_cmd_cache_flush_count--;
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ ath11k_dp_reo_cache_flush(ab, &elem->data);
+ kfree(elem);
+ spin_lock_bh(&dp->reo_cmd_lock);
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return;
+free_desc:
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+}
+
+void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
+ int ret;
+
+ if (!rx_tid->active)
+ return;
+
+ rx_tid->active = false;
+
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ ath11k_dp_rx_tid_del_func);
+ if (ret) {
+ if (ret != -ESHUTDOWN)
+ ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
+ tid, ret);
+ dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+ }
+
+ rx_tid->paddr = 0;
+ rx_tid->size = 0;
+}
+
+static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
+ u32 *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ u32 *desc;
+ int ret = 0;
+
+ srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc) {
+ ret = -ENOBUFS;
+ goto exit;
+ }
+
+ ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
+ action);
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
+{
+ struct ath11k_base *ab = rx_tid->ab;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rx_tid->dst_ring_desc) {
+ if (rel_link_desc)
+ ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ kfree(rx_tid->dst_ring_desc);
+ rx_tid->dst_ring_desc = NULL;
+ }
+
+ rx_tid->cur_sn = 0;
+ rx_tid->last_frag_no = 0;
+ rx_tid->rx_frag_bitmap = 0;
+ __skb_queue_purge(&rx_tid->rx_frags);
+}
+
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ }
+}
+
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
+{
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ lockdep_assert_held(&ar->ab->base_lock);
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+
+ ath11k_peer_rx_tid_delete(ar, peer, i);
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ar->ab->base_lock);
+ }
+}
+
+static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
+ struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid,
+ u32 ba_win_sz, u16 ssn,
+ bool update_ssn)
+{
+ struct ath11k_hal_reo_cmd cmd = {0};
+ int ret;
+
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
+ cmd.ba_window_size = ba_win_sz;
+
+ if (update_ssn) {
+ cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
+ cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
+ }
+
+ ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
+ NULL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
+ rx_tid->tid, ret);
+ return ret;
+ }
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ return 0;
+}
+
+static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
+ const u8 *peer_mac, int vdev_id, u8 tid)
+{
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
+ goto unlock_exit;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ goto unlock_exit;
+
+ dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
+ DMA_BIDIRECTIONAL);
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+
+ rx_tid->active = false;
+
+unlock_exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ u32 hw_desc_sz;
+ u32 *addr_aligned;
+ void *vaddr;
+ dma_addr_t paddr;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
+ peer_mac);
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+ /* Update the tid queue if it is already setup */
+ if (rx_tid->active) {
+ paddr = rx_tid->paddr;
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
+ ba_win_sz, ssn, true);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
+ peer_mac, tid, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ peer_mac, paddr,
+ tid, 1, ba_win_sz);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
+ peer_mac, tid, ret);
+ return ret;
+ }
+
+ rx_tid->tid = tid;
+
+ rx_tid->ba_win_sz = ba_win_sz;
+
+ /* TODO: Optimize the memory allocation for qos tid based on
+ * the actual BA window size in REO tid update path.
+ */
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
+ else
+ hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
+
+ vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
+ if (!vaddr) {
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOMEM;
+ }
+
+ addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
+
+ ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
+ ssn, pn_type);
+
+ paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
+ DMA_BIDIRECTIONAL);
+
+ ret = dma_mapping_error(ab->dev, paddr);
+ if (ret) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
+ peer_mac, tid, ret);
+ goto err_mem_free;
+ }
+
+ rx_tid->vaddr = vaddr;
+ rx_tid->paddr = paddr;
+ rx_tid->size = hw_desc_sz;
+ rx_tid->active = true;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
+ paddr, tid, 1, ba_win_sz);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
+ peer_mac, tid, ret);
+ ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
+ }
+
+ return ret;
+
+err_mem_free:
+ kfree(rx_tid->vaddr);
+ rx_tid->vaddr = NULL;
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ int ret;
+
+ ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
+ params->tid, params->buf_size,
+ params->ssn, arsta->pn_type);
+ if (ret)
+ ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
+
+ return ret;
+}
+
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
+ int vdev_id = arsta->arvif->vdev_id;
+ dma_addr_t paddr;
+ bool active;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ paddr = peer->rx_tid[params->tid].paddr;
+ active = peer->rx_tid[params->tid].active;
+
+ if (!active) {
+ spin_unlock_bh(&ab->base_lock);
+ return 0;
+ }
+
+ ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
+ spin_unlock_bh(&ab->base_lock);
+ if (ret) {
+ ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
+ params->tid, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
+ params->sta->addr, paddr,
+ params->tid, 1, 1);
+ if (ret)
+ ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
+ ret);
+
+ return ret;
+}
+
+int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_hal_reo_cmd cmd = {0};
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ u8 tid;
+ int ret = 0;
+
+ /* NOTE: Enable PN/TSC replay check offload only for unicast frames.
+ * We use mac80211 PN/TSC replay check functionality for bcast/mcast
+ * for now.
+ */
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return 0;
+
+ cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+ cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
+ HAL_REO_CMD_UPD0_PN_SIZE |
+ HAL_REO_CMD_UPD0_PN_VALID |
+ HAL_REO_CMD_UPD0_PN_CHECK |
+ HAL_REO_CMD_UPD0_SVLD;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (key_cmd == SET_KEY) {
+ cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
+ cmd.pn_size = 48;
+ }
+ break;
+ default:
+ break;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
+ spin_unlock_bh(&ab->base_lock);
+ return -ENOENT;
+ }
+
+ for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+ rx_tid = &peer->rx_tid[tid];
+ if (!rx_tid->active)
+ continue;
+ cmd.addr_lo = lower_32_bits(rx_tid->paddr);
+ cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+ ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
+ HAL_REO_CMD_UPDATE_RX_QUEUE,
+ &cmd, NULL);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
+ tid, ret);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return ret;
+}
+
+static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
+ u16 peer_id)
+{
+ int i;
+
+ for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
+ if (ppdu_stats->user_stats[i].is_valid_peer_id) {
+ if (peer_id == ppdu_stats->user_stats[i].peer_id)
+ return i;
+ } else {
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len, const void *ptr,
+ void *data)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct htt_ppdu_user_stats *user_stats;
+ int cur_user;
+ u16 peer_id;
+
+ ppdu_info = (struct htt_ppdu_stats_info *)data;
+
+ switch (tag) {
+ case HTT_PPDU_STATS_TAG_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_common)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+ memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
+ sizeof(struct htt_ppdu_stats_common));
+ break;
+ case HTT_PPDU_STATS_TAG_USR_RATE:
+ if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->rate, ptr,
+ sizeof(struct htt_ppdu_stats_user_rate));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
+ if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->cmpltn_cmn, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
+ if (len <
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
+ ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ peer_id =
+ ((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
+ cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
+ peer_id);
+ if (cur_user < 0)
+ return -EINVAL;
+ user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
+ user_stats->peer_id = peer_id;
+ user_stats->is_valid_peer_id = true;
+ memcpy((void *)&user_stats->ack_ba, ptr,
+ sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
+ user_stats->tlv_flags |= BIT(tag);
+ break;
+ }
+ return 0;
+}
+
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const struct htt_tlv *tlv;
+ const void *begin = ptr;
+ u16 tlv_tag, tlv_len;
+ int ret = -EINVAL;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+ tlv = (struct htt_tlv *)ptr;
+ tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret == -ENOMEM)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+ return 0;
+}
+
+static void
+ath11k_update_per_peer_tx_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats, u8 user)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ struct htt_ppdu_stats_user_rate *user_rate;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
+ struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
+ struct htt_ppdu_stats_common *common = &ppdu_stats->common;
+ int ret;
+ u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
+ u32 succ_bytes = 0;
+ u16 rate = 0, succ_pkts = 0;
+ u32 tx_duration = 0;
+ u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
+ bool is_ampdu = false;
+
+ if (!usr_stats)
+ return;
+
+ if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
+ return;
+
+ if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
+ is_ampdu =
+ HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
+
+ if (usr_stats->tlv_flags &
+ BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
+ succ_bytes = usr_stats->ack_ba.success_bytes;
+ succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
+ usr_stats->ack_ba.info);
+ tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
+ usr_stats->ack_ba.info);
+ }
+
+ if (common->fes_duration_us)
+ tx_duration = common->fes_duration_us;
+
+ user_rate = &usr_stats->rate;
+ flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
+ bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
+ nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
+ mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
+ sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
+ dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
+
+ /* Note: If host configured fixed rates and in some other special
+ * cases, the broadcast/management frames are sent in different rates.
+ * Firmware rate's control to be skipped for this?
+ */
+
+ if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
+ ath11k_warn(ab, "Invalid HE mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
+ ath11k_warn(ab, "Invalid VHT mcs %d peer stats", mcs);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
+ ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
+ mcs, nss);
+ return;
+ }
+
+ if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ flags,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ return;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
+
+ if (!peer || !peer->sta) {
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+ return;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+
+ switch (flags) {
+ case WMI_RATE_PREAMBLE_OFDM:
+ arsta->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_CCK:
+ arsta->txrate.legacy = rate;
+ break;
+ case WMI_RATE_PREAMBLE_HT:
+ arsta->txrate.mcs = mcs + 8 * (nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_VHT:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case WMI_RATE_PREAMBLE_HE:
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ arsta->txrate.he_dcm = dcm;
+ arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
+ arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
+ ((user_rate->ru_end -
+ user_rate->ru_start) + 1);
+ break;
+ }
+
+ arsta->txrate.nss = nss;
+
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ arsta->tx_duration += tx_duration;
+ memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
+
+ /* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
+ * So skip peer stats update for mgmt packets.
+ */
+ if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
+ memset(peer_stats, 0, sizeof(*peer_stats));
+ peer_stats->succ_pkts = succ_pkts;
+ peer_stats->succ_bytes = succ_bytes;
+ peer_stats->is_ampdu = is_ampdu;
+ peer_stats->duration = tx_duration;
+ peer_stats->ba_fails =
+ HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
+ HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
+ ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+}
+
+static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
+ struct htt_ppdu_stats *ppdu_stats)
+{
+ u8 user;
+
+ for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
+ ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
+}
+
+static
+struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
+ u32 ppdu_id)
+{
+ struct htt_ppdu_stats_info *ppdu_info;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ if (!list_empty(&ar->ppdu_stats_info)) {
+ list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
+ if (ppdu_info->ppdu_id == ppdu_id)
+ return ppdu_info;
+ }
+
+ if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
+ ppdu_info = list_first_entry(&ar->ppdu_stats_info,
+ typeof(*ppdu_info), list);
+ list_del(&ppdu_info->list);
+ ar->ppdu_stat_list_depth--;
+ ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
+ kfree(ppdu_info);
+ }
+ }
+
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
+ if (!ppdu_info)
+ return NULL;
+
+ list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
+ ar->ppdu_stat_list_depth++;
+
+ return ppdu_info;
+}
+
+static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htt_ppdu_stats_msg *msg;
+ struct htt_ppdu_stats_info *ppdu_info;
+ struct ath11k *ar;
+ int ret;
+ u8 pdev_id;
+ u32 ppdu_id, len;
+
+ msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
+ len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
+ ppdu_id = msg->ppdu_id;
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
+ trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
+
+ spin_lock_bh(&ar->data_lock);
+ ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
+ if (!ppdu_info) {
+ ret = -EINVAL;
+ goto out_unlock_data;
+ }
+
+ ppdu_info->ppdu_id = ppdu_id;
+ ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath11k_htt_tlv_ppdu_stats_parse,
+ (void *)ppdu_info);
+ if (ret) {
+ ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
+ goto out_unlock_data;
+ }
+
+out_unlock_data:
+ spin_unlock_bh(&ar->data_lock);
+
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+
+static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
+ struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
+ struct ath11k *ar;
+ u8 pdev_id;
+
+ pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
+ goto out;
+ }
+
+ trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
+ ar->ab->pktlog_defs_checksum);
+
+out:
+ rcu_read_unlock();
+}
+
+static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ u32 *data = (u32 *)skb->data;
+ u8 pdev_id, ring_type, ring_id, pdev_idx;
+ u16 hp, tp;
+ u32 backpressure_time;
+ struct ath11k_bp_stats *bp_stats;
+
+ pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
+ ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
+ ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
+ ++data;
+
+ hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
+ tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
+ ++data;
+
+ backpressure_time = *data;
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
+ pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
+
+ if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
+ if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
+ return;
+
+ bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
+ } else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
+ pdev_idx = DP_HW2SW_MACID(pdev_id);
+
+ if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
+ return;
+
+ bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
+ } else {
+ ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
+ ring_type);
+ return;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ bp_stats->hp = hp;
+ bp_stats->tp = tp;
+ bp_stats->count++;
+ bp_stats->jiffies = jiffies;
+ spin_unlock_bh(&ab->base_lock);
+}
+
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
+ enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
+ u16 peer_id;
+ u8 vdev_id;
+ u8 mac_addr[ETH_ALEN];
+ u16 peer_mac_h16;
+ u16 ast_hash;
+ u16 hw_peer_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
+
+ switch (type) {
+ case HTT_T2H_MSG_TYPE_VERSION_CONF:
+ dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
+ resp->version_msg.version);
+ dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
+ resp->version_msg.version);
+ complete(&dp->htt_tgt_version_received);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP:
+ vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+ resp->peer_map_ev.info);
+ peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+ resp->peer_map_ev.info);
+ peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+ resp->peer_map_ev.info1);
+ ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP2:
+ vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+ resp->peer_map_ev.info);
+ peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+ resp->peer_map_ev.info);
+ peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+ resp->peer_map_ev.info1);
+ ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
+ ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
+ resp->peer_map_ev.info2);
+ hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
+ resp->peer_map_ev.info1);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
+ hw_peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
+ peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
+ resp->peer_unmap_ev.info);
+ ath11k_peer_unmap_event(ab, peer_id);
+ break;
+ case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
+ ath11k_htt_pull_ppdu_stats(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath11k_debugfs_htt_ext_stats_handler(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_PKTLOG:
+ ath11k_htt_pktlog(ab, skb);
+ break;
+ case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
+ ath11k_htt_backpressure_event_handler(ab, skb);
+ break;
+ default:
+ ath11k_warn(ab, "htt event %d not handled\n", type);
+ break;
+ }
+
+ dev_kfree_skb_any(skb);
+}
+
+static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
+ struct sk_buff_head *msdu_list,
+ struct sk_buff *first, struct sk_buff *last,
+ u8 l3pad_bytes, int msdu_len)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+ int buf_first_hdr_len, buf_first_len;
+ struct hal_rx_desc *ldesc;
+ int space_extra, rem_len, buf_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ /* As the msdu is spread across multiple rx buffers,
+ * find the offset to the start of msdu for computing
+ * the length of the msdu in the first buffer.
+ */
+ buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
+ buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
+
+ if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
+ skb_put(first, buf_first_hdr_len + msdu_len);
+ skb_pull(first, buf_first_hdr_len);
+ return 0;
+ }
+
+ ldesc = (struct hal_rx_desc *)last->data;
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
+
+ /* MSDU spans over multiple buffers because the length of the MSDU
+ * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
+ * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
+ */
+ skb_put(first, DP_RX_BUFFER_SIZE);
+ skb_pull(first, buf_first_hdr_len);
+
+ /* When an MSDU spread over multiple buffers attention, MSDU_END and
+ * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
+ */
+ ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
+
+ space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
+ if (space_extra > 0 &&
+ (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
+ /* Free up all buffers of the MSDU */
+ while ((skb = __skb_dequeue(msdu_list)) != NULL) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+ return -ENOMEM;
+ }
+
+ rem_len = msdu_len - buf_first_len;
+ while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->is_continuation)
+ buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
+ else
+ buf_len = rem_len;
+
+ if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
+ WARN_ON_ONCE(1);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ skb_put(skb, buf_len + hal_rx_desc_sz);
+ skb_pull(skb, hal_rx_desc_sz);
+ skb_copy_from_linear_data(skb, skb_put(first, buf_len),
+ buf_len);
+ dev_kfree_skb_any(skb);
+
+ rem_len -= buf_len;
+ if (!rxcb->is_continuation)
+ break;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
+ struct sk_buff *first)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
+
+ if (!rxcb->is_continuation)
+ return first;
+
+ skb_queue_walk(msdu_list, skb) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (!rxcb->is_continuation)
+ return skb;
+ }
+
+ return NULL;
+}
+
+static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct rx_attention *rx_attention;
+ bool ip_csum_fail, l4_csum_fail;
+
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
+ ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
+ l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
+
+ msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
+ CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+}
+
+static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return 0;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_MIC_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_IV_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ return IEEE80211_CCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return IEEE80211_GCMP_HDR_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
+ enum hal_encrypt_type enctype)
+{
+ switch (enctype) {
+ case HAL_ENCRYPT_TYPE_OPEN:
+ case HAL_ENCRYPT_TYPE_CCMP_128:
+ case HAL_ENCRYPT_TYPE_CCMP_256:
+ case HAL_ENCRYPT_TYPE_GCMP_128:
+ case HAL_ENCRYPT_TYPE_AES_GCMP_256:
+ return 0;
+ case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
+ case HAL_ENCRYPT_TYPE_TKIP_MIC:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HAL_ENCRYPT_TYPE_WEP_40:
+ case HAL_ENCRYPT_TYPE_WEP_104:
+ case HAL_ENCRYPT_TYPE_WEP_128:
+ case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
+ case HAL_ENCRYPT_TYPE_WAPI:
+ break;
+ }
+
+ ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
+ return 0;
+}
+
+static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ u16 qos_ctl = 0;
+ u8 *qos;
+
+ /* copy SA & DA and pull decapped header */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ ether_addr_copy(da, ieee80211_get_DA(hdr));
+ ether_addr_copy(sa, ieee80211_get_SA(hdr));
+ skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
+
+ if (rxcb->is_first_msdu) {
+ /* original 802.11 header is valid for the first msdu
+ * hence we can reuse the same header
+ */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /* Each A-MSDU subframe will be reported as a separate MSDU,
+ * so strip the A-MSDU bit from QoS Ctl.
+ */
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
+ } else {
+ /* Rebuild qos header if this is a middle/last msdu */
+ hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+
+ /* Reset the order bit as the HT_Control header is stripped */
+ hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
+
+ qos_ctl = rxcb->tid;
+
+ if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
+ qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
+
+ /* TODO Add other QoS ctl fields when required */
+
+ /* copy decap header before overwriting for reuse below */
+ memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
+ }
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ if (!rxcb->is_first_msdu) {
+ memcpy(skb_push(msdu,
+ IEEE80211_QOS_CTL_LEN), &qos_ctl,
+ IEEE80211_QOS_CTL_LEN);
+ memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
+ return;
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+
+ if (!rxcb->is_first_msdu ||
+ !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ skb_trim(msdu, msdu->len - FCS_LEN);
+
+ if (!decrypted)
+ return;
+
+ hdr = (void *)msdu->data;
+
+ /* Tail */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ } else {
+ /* MIC */
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+ }
+
+ /* MMIC */
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
+ enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
+ skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
+
+ /* Head */
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
+ struct sk_buff *msdu,
+ enum hal_encrypt_type enctype)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len, crypto_len;
+ void *rfc1042;
+ bool is_amsdu;
+
+ is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
+ hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
+ rfc1042 = hdr;
+
+ if (rxcb->is_first_msdu) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ rfc1042 += hdr_len + crypto_len;
+ }
+
+ if (is_amsdu)
+ rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
+
+ return rfc1042;
+}
+
+static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u8 *first_hdr,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status)
+{
+ struct ieee80211_hdr *hdr;
+ struct ethhdr *eth;
+ size_t hdr_len;
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ void *rfc1042;
+
+ rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
+ if (WARN_ON_ONCE(!rfc1042))
+ return;
+
+ /* pull decapped header and copy SA & DA */
+ eth = (struct ethhdr *)msdu->data;
+ ether_addr_copy(da, eth->h_dest);
+ ether_addr_copy(sa, eth->h_source);
+ skb_pull(msdu, sizeof(struct ethhdr));
+
+ /* push rfc1042/llc/snap */
+ memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
+ sizeof(struct ath11k_dp_rfc1042_hdr));
+
+ /* push original 802.11 header */
+ hdr = (struct ieee80211_hdr *)first_hdr;
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath11k_dp_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + hdr_len,
+ ath11k_dp_rx_crypto_param_len(ar, enctype));
+ }
+
+ memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
+
+ /* original 802.11 header has a different DA and in
+ * case of 4addr it may also have different SA
+ */
+ hdr = (struct ieee80211_hdr *)msdu->data;
+ ether_addr_copy(ieee80211_get_DA(hdr), da);
+ ether_addr_copy(ieee80211_get_SA(hdr), sa);
+}
+
+static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ enum hal_encrypt_type enctype,
+ struct ieee80211_rx_status *status,
+ bool decrypted)
+{
+ u8 *first_hdr;
+ u8 decap;
+ struct ethhdr *ehdr;
+
+ first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
+
+ switch (decap) {
+ case DP_RX_DECAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_RAW:
+ ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
+ decrypted);
+ break;
+ case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
+ ehdr = (struct ethhdr *)msdu->data;
+
+ /* mac80211 allows fast path only for authorized STA */
+ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
+ ATH11K_SKB_RXCB(msdu)->is_eapol = true;
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ }
+
+ /* PN for mcast packets will be validated in mac80211;
+ * remove eth header and add 802.11 header.
+ */
+ if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ case DP_RX_DECAP_TYPE_8023:
+ /* TODO: Handle undecap for these formats */
+ break;
+ }
+}
+
+static struct ath11k_peer *
+ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ struct ath11k_peer *peer = NULL;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rxcb->peer_id)
+ peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
+
+ if (peer)
+ return peer;
+
+ if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
+ return NULL;
+
+ peer = ath11k_peer_find_by_addr(ab,
+ ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
+ return peer;
+}
+
+static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ bool fill_crypto_hdr;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted = false;
+ struct ath11k_skb_rxcb *rxcb;
+ struct ieee80211_hdr *hdr;
+ struct ath11k_peer *peer;
+ struct rx_attention *rx_attention;
+ u32 err_bitmap;
+
+ /* PN for multicast packets will be checked in mac80211 */
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+ rxcb->is_mcbc = fill_crypto_hdr;
+
+ if (rxcb->is_mcbc) {
+ rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
+ rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
+ }
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer) {
+ if (rxcb->is_mcbc)
+ enctype = peer->sec_type_grp;
+ else
+ enctype = peer->sec_type;
+ } else {
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
+
+ /* Clear per-MPDU flags while leaving per-PPDU flags intact */
+ rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
+ RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED |
+ RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
+ rx_status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (is_decrypted) {
+ rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
+
+ if (fill_crypto_hdr)
+ rx_status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ rx_status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_PN_VALIDATED;
+ }
+
+ ath11k_dp_rx_h_csum_offload(ar, msdu);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ enctype, rx_status, is_decrypted);
+
+ if (!is_decrypted || fill_crypto_hdr)
+ return;
+
+ if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
+}
+
+static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_supported_band *sband;
+ enum rx_msdu_start_pkt_type pkt_type;
+ u8 bw;
+ u8 rate_mcs, nss;
+ u8 sgi;
+ bool is_cck, is_ldpc;
+
+ pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
+ bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
+ rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
+ nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
+ sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
+
+ switch (pkt_type) {
+ case RX_MSDU_START_PKT_TYPE_11A:
+ case RX_MSDU_START_PKT_TYPE_11B:
+ is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+ sband = &ar->mac.sbands[rx_status->band];
+ rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
+ is_cck);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11N:
+ rx_status->encoding = RX_ENC_HT;
+ if (rate_mcs > ATH11K_HT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AC:
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_VHT_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in VHT mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->nss = nss;
+ if (sgi)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
+ if (is_ldpc)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+ break;
+ case RX_MSDU_START_PKT_TYPE_11AX:
+ rx_status->rate_idx = rate_mcs;
+ if (rate_mcs > ATH11K_HE_MCS_MAX) {
+ ath11k_warn(ar->ab,
+ "Received with invalid mcs in HE mode %d\n",
+ rate_mcs);
+ break;
+ }
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->nss = nss;
+ rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
+ rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ break;
+ }
+}
+
+static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
+ struct ieee80211_rx_status *rx_status)
+{
+ u8 channel_num;
+ u32 center_freq, meta_data;
+ struct ieee80211_channel *channel;
+
+ rx_status->freq = 0;
+ rx_status->rate_idx = 0;
+ rx_status->nss = 0;
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->bw = RATE_INFO_BW_20;
+
+ rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+ meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
+ channel_num = meta_data;
+ center_freq = meta_data >> 16;
+
+ if (center_freq >= ATH11K_MIN_6G_FREQ &&
+ center_freq <= ATH11K_MAX_6G_FREQ) {
+ rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
+ } else if (channel_num >= 1 && channel_num <= 14) {
+ rx_status->band = NL80211_BAND_2GHZ;
+ } else if (channel_num >= 36 && channel_num <= 177) {
+ rx_status->band = NL80211_BAND_5GHZ;
+ } else {
+ spin_lock_bh(&ar->data_lock);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
+ spin_unlock_bh(&ar->data_lock);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
+ rx_desc, sizeof(struct hal_rx_desc));
+ }
+
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
+
+ ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
+}
+
+static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ static const struct ieee80211_radiotap_he known = {
+ .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
+ .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
+ };
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_radiotap_he *he = NULL;
+ struct ieee80211_sta *pubsta = NULL;
+ struct ath11k_peer *peer;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+ bool is_mcbc = rxcb->is_mcbc;
+ bool is_eapol = rxcb->is_eapol;
+
+ if (status->encoding == RX_ENC_HE &&
+ !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ !(status->flag & RX_FLAG_SKIP_MONITOR)) {
+ he = skb_push(msdu, sizeof(known));
+ memcpy(he, &known, sizeof(known));
+ status->flag |= RX_FLAG_RADIOTAP_HE;
+ }
+
+ if (!(status->flag & RX_FLAG_ONLY_MONITOR))
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer && peer->sta)
+ pubsta = peer->sta;
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ msdu,
+ msdu->len,
+ peer ? peer->addr : NULL,
+ rxcb->tid,
+ is_mcbc ? "mcast" : "ucast",
+ rxcb->seq_no,
+ (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
+ (status->encoding == RX_ENC_HT) ? "ht" : "",
+ (status->encoding == RX_ENC_VHT) ? "vht" : "",
+ (status->encoding == RX_ENC_HE) ? "he" : "",
+ (status->bw == RATE_INFO_BW_40) ? "40" : "",
+ (status->bw == RATE_INFO_BW_80) ? "80" : "",
+ (status->bw == RATE_INFO_BW_160) ? "160" : "",
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
+ status->rate_idx,
+ status->nss,
+ status->freq,
+ status->band, status->flag,
+ !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
+ !!(status->flag & RX_FLAG_MMIC_ERROR),
+ !!(status->flag & RX_FLAG_AMSDU_MORE));
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ msdu->data, msdu->len);
+
+ rx_status = IEEE80211_SKB_RXCB(msdu);
+ *rx_status = *status;
+
+ /* TODO: trace rx packet */
+
+ /* PN for multicast packets are not validate in HW,
+ * so skip 802.3 rx path
+ * Also, fast_rx expects the STA to be authorized, hence
+ * eapol packets are sent in slow path.
+ */
+ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
+ !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
+ rx_status->flag |= RX_FLAG_8023;
+
+ ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
+}
+
+static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc, *lrx_desc;
+ struct rx_attention *rx_attention;
+ struct ath11k_skb_rxcb *rxcb;
+ struct sk_buff *last_buf;
+ u8 l3_pad_bytes;
+ u8 *hdr_status;
+ u16 msdu_len;
+ int ret;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
+ if (!last_buf) {
+ ath11k_warn(ab,
+ "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
+ ath11k_warn(ar->ab, "msdu len not valid\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ lrx_desc = (struct hal_rx_desc *)last_buf->data;
+ rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
+ if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
+ ath11k_warn(ab, "msdu_done bit in attention is not set\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ rxcb->rx_desc = rx_desc;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
+ l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else if (!rxcb->is_continuation) {
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
+ ret = -EINVAL;
+ ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
+ ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
+ sizeof(struct ieee80211_hdr));
+ ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(struct hal_rx_desc));
+ goto free_out;
+ }
+ skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
+ } else {
+ ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
+ msdu, last_buf,
+ l3_pad_bytes, msdu_len);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to coalesce msdu rx buffer%d\n", ret);
+ goto free_out;
+ }
+ }
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+ ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+
+ rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+
+ return 0;
+
+free_out:
+ return ret;
+}
+
+static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
+ struct napi_struct *napi,
+ struct sk_buff_head *msdu_list,
+ int mac_id)
+{
+ struct sk_buff *msdu;
+ struct ath11k *ar;
+ struct ieee80211_rx_status rx_status = {0};
+ int ret;
+
+ if (skb_queue_empty(msdu_list))
+ return;
+
+ if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
+ __skb_queue_purge(msdu_list);
+ return;
+ }
+
+ ar = ab->pdevs[mac_id].ar;
+ if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
+ __skb_queue_purge(msdu_list);
+ return;
+ }
+
+ while ((msdu = __skb_dequeue(msdu_list))) {
+ ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
+ if (unlikely(ret)) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "Unable to process msdu %d", ret);
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
+ }
+}
+
+int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ int num_buffs_reaped[MAX_RADIOS] = {0};
+ struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct ath11k_skb_rxcb *rxcb;
+ int total_msdu_reaped = 0;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ bool done = false;
+ int buf_id, mac_id;
+ struct ath11k *ar;
+ struct hal_reo_dest_ring *desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 cookie;
+ int i;
+
+ for (i = 0; i < MAX_RADIOS; i++)
+ __skb_queue_head_init(&msdu_list[i]);
+
+ srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+try_again:
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (likely(desc =
+ (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
+ srng))) {
+ cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ desc->buf_addr_info.info1);
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ cookie);
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
+
+ if (unlikely(buf_id == 0))
+ continue;
+
+ ar = ab->pdevs[mac_id].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (unlikely(!msdu)) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped[mac_id]++;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ if (unlikely(push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
+ dev_kfree_skb_any(msdu);
+ ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
+ continue;
+ }
+
+ rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
+ rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
+ rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
+ RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
+ rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
+ desc->rx_mpdu_info.meta_data);
+ rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
+ desc->rx_mpdu_info.info0);
+ rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
+ desc->info0);
+
+ rxcb->mac_id = mac_id;
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
+
+ if (rxcb->is_continuation) {
+ done = false;
+ } else {
+ total_msdu_reaped++;
+ done = true;
+ }
+
+ if (total_msdu_reaped >= budget)
+ break;
+ }
+
+ /* Hw might have updated the head pointer after we cached it.
+ * In this case, even though there are entries in the ring we'll
+ * get rx_desc NULL. Give the read another try with updated cached
+ * head pointer so that we can reap complete MPDU in the current
+ * rx processing.
+ */
+ if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
+ ath11k_hal_srng_access_end(ab, srng);
+ goto try_again;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (unlikely(!total_msdu_reaped))
+ goto exit;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!num_buffs_reaped[i])
+ continue;
+
+ ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
+ ab->hw_params.hal_params->rx_buf_rbm);
+ }
+exit:
+ return total_msdu_reaped;
+}
+
+static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
+ struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+ struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
+ u32 num_msdu;
+ int i;
+
+ if (!rx_stats)
+ return;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+ ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+
+ num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
+ ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
+
+ rx_stats->num_msdu += num_msdu;
+ rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
+ ppdu_info->tcp_ack_msdu_count;
+ rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
+ rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
+
+ if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
+ ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
+ ppdu_info->nss = 1;
+ ppdu_info->mcs = HAL_RX_MAX_MCS;
+ ppdu_info->tid = IEEE80211_NUM_TIDS;
+ }
+
+ if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
+ rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
+
+ if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
+ rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
+
+ if (ppdu_info->gi < HAL_RX_GI_MAX)
+ rx_stats->gi_count[ppdu_info->gi] += num_msdu;
+
+ if (ppdu_info->bw < HAL_RX_BW_MAX)
+ rx_stats->bw_count[ppdu_info->bw] += num_msdu;
+
+ if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
+ rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
+
+ if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
+ rx_stats->tid_count[ppdu_info->tid] += num_msdu;
+
+ if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
+ rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
+
+ if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
+ rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
+
+ if (ppdu_info->is_stbc)
+ rx_stats->stbc_count += num_msdu;
+
+ if (ppdu_info->beamformed)
+ rx_stats->beamformed_count += num_msdu;
+
+ if (ppdu_info->num_mpdu_fcs_ok > 1)
+ rx_stats->ampdu_msdu_count += num_msdu;
+ else
+ rx_stats->non_ampdu_msdu_count += num_msdu;
+
+ rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
+ rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
+ rx_stats->dcm_count += ppdu_info->dcm;
+ rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
+
+ arsta->rssi_comb = ppdu_info->rssi_comb;
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
+
+ for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
+ arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
+
+ rx_stats->rx_duration += ppdu_info->rx_duration;
+ arsta->rx_duration = rx_stats->rx_duration;
+}
+
+static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
+ struct dp_rxdma_ring *rx_ring,
+ int *buf_id)
+{
+ struct sk_buff *skb;
+ dma_addr_t paddr;
+
+ skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
+ DP_RX_BUFFER_ALIGN_SIZE);
+
+ if (!skb)
+ goto fail_alloc_skb;
+
+ if (!IS_ALIGNED((unsigned long)skb->data,
+ DP_RX_BUFFER_ALIGN_SIZE)) {
+ skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
+ skb->data);
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, paddr)))
+ goto fail_free_skb;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ *buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
+ rx_ring->bufs_max, GFP_ATOMIC);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (*buf_id < 0)
+ goto fail_dma_unmap;
+
+ ATH11K_SKB_RXCB(skb)->paddr = paddr;
+ return skb;
+
+fail_dma_unmap:
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+fail_free_skb:
+ dev_kfree_skb_any(skb);
+fail_alloc_skb:
+ return NULL;
+}
+
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr)
+{
+ struct hal_srng *srng;
+ u32 *desc;
+ struct sk_buff *skb;
+ int num_free;
+ int num_remain;
+ int buf_id;
+ u32 cookie;
+ dma_addr_t paddr;
+
+ req_entries = min(req_entries, rx_ring->bufs_max);
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
+
+ req_entries = min(num_free, req_entries);
+ num_remain = req_entries;
+
+ while (num_remain > 0) {
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id);
+ if (!skb)
+ break;
+ paddr = ATH11K_SKB_RXCB(skb)->paddr;
+
+ desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!desc)
+ goto fail_desc_get;
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ num_remain--;
+
+ ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+
+fail_desc_get:
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return req_entries - num_remain;
+}
+
+#define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
+
+static void
+ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
+ struct hal_tlv_hdr *tlv)
+{
+ struct hal_rx_ppdu_start *ppdu_start;
+ u16 ppdu_id_diff, ppdu_id, tlv_len;
+ u8 *ptr;
+
+ /* PPDU id is part of second tlv, move ptr to second tlv */
+ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ ptr = (u8 *)tlv;
+ ptr += sizeof(*tlv) + tlv_len;
+ tlv = (struct hal_tlv_hdr *)ptr;
+
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
+ return;
+
+ ptr += sizeof(*tlv);
+ ppdu_start = (struct hal_rx_ppdu_start *)ptr;
+ ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
+ __le32_to_cpu(ppdu_start->info0));
+
+ if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
+ pmon->buf_state = DP_MON_STATUS_LEAD;
+ ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
+ if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ } else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
+ if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
+ pmon->buf_state = DP_MON_STATUS_LEAD;
+ }
+}
+
+static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
+ int *budget, struct sk_buff_head *skb_list)
+{
+ struct ath11k *ar;
+ const struct ath11k_hw_hal_params *hal_params;
+ struct ath11k_pdev_dp *dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct ath11k_mon_data *pmon;
+ struct hal_srng *srng;
+ void *rx_mon_status_desc;
+ struct sk_buff *skb;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_tlv_hdr *tlv;
+ u32 cookie;
+ int buf_id, srng_id;
+ dma_addr_t paddr;
+ u8 rbm;
+ int num_buffs_reaped = 0;
+
+ ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+ dp = &ar->dp;
+ pmon = &dp->mon_data;
+ srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
+ rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
+
+ srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ while (*budget) {
+ *budget -= 1;
+ rx_mon_status_desc =
+ ath11k_hal_srng_src_peek(ab, srng);
+ if (!rx_mon_status_desc) {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ break;
+ }
+
+ ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
+ &cookie, &rbm);
+ if (paddr) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb) {
+ ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
+ buf_id);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
+ }
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_hdr *)skb->data;
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
+ HAL_RX_STATUS_BUFFER_DONE) {
+ ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
+ FIELD_GET(HAL_TLV_HDR_TAG,
+ tlv->tl), buf_id);
+ /* If done status is missing, hold onto status
+ * ring until status is done for this status
+ * ring buffer.
+ * Keep HP in mon_status_ring unchanged,
+ * and break from here.
+ * Check status for same buffer for next time
+ */
+ pmon->buf_state = DP_MON_STATUS_NO_DMA;
+ break;
+ }
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (ab->hw_params.full_monitor_mode) {
+ ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
+ if (paddr == pmon->mon_status_paddr)
+ pmon->buf_state = DP_MON_STATUS_MATCH;
+ }
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ __skb_queue_tail(skb_list, skb);
+ } else {
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ }
+move_next:
+ skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
+ &buf_id);
+
+ if (!skb) {
+ hal_params = ab->hw_params.hal_params;
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
+ hal_params->rx_buf_rbm);
+ num_buffs_reaped++;
+ break;
+ }
+ rxcb = ATH11K_SKB_RXCB(skb);
+
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
+ cookie,
+ ab->hw_params.hal_params->rx_buf_rbm);
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ num_buffs_reaped++;
+ }
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return num_buffs_reaped;
+}
+
+static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
+{
+ struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
+
+ spin_lock_bh(&rx_tid->ab->base_lock);
+ if (rx_tid->last_frag_no &&
+ rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+ return;
+ }
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ spin_unlock_bh(&rx_tid->ab->base_lock);
+}
+
+int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct crypto_shash *tfm;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ int i;
+
+ tfm = crypto_alloc_shash("michael_mic", 0, 0);
+ if (IS_ERR(tfm)) {
+ ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find(ab, vdev_id, peer_mac);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
+ spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
+ return -ENOENT;
+ }
+
+ for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
+ rx_tid = &peer->rx_tid[i];
+ rx_tid->ab = ab;
+ timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
+ skb_queue_head_init(&rx_tid->rx_frags);
+ }
+
+ peer->tfm_mmic = tfm;
+ peer->dp_setup_done = true;
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+}
+
+static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
+ struct ieee80211_hdr *hdr, u8 *data,
+ size_t data_len, u8 *mic)
+{
+ SHASH_DESC_ON_STACK(desc, tfm);
+ u8 mic_hdr[16] = {0};
+ u8 tid = 0;
+ int ret;
+
+ if (!tfm)
+ return -EINVAL;
+
+ desc->tfm = tfm;
+
+ ret = crypto_shash_setkey(tfm, key, 8);
+ if (ret)
+ goto out;
+
+ ret = crypto_shash_init(desc);
+ if (ret)
+ goto out;
+
+ /* TKIP MIC header */
+ memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
+ memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tid = ieee80211_get_tid(hdr);
+ mic_hdr[12] = tid;
+
+ ret = crypto_shash_update(desc, mic_hdr, 16);
+ if (ret)
+ goto out;
+ ret = crypto_shash_update(desc, data, data_len);
+ if (ret)
+ goto out;
+ ret = crypto_shash_final(desc, mic);
+out:
+ shash_desc_zero(desc);
+ return ret;
+}
+
+static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
+ struct sk_buff *msdu)
+{
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
+ struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
+ struct ieee80211_key_conf *key_conf;
+ struct ieee80211_hdr *hdr;
+ u8 mic[IEEE80211_CCMP_MIC_LEN];
+ int head_len, tail_len, ret;
+ size_t data_len;
+ u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+ u8 *key, *data;
+ u8 key_idx;
+
+ if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
+ HAL_ENCRYPT_TYPE_TKIP_MIC)
+ return 0;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
+ tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
+
+ if (!is_multicast_ether_addr(hdr->addr1))
+ key_idx = peer->ucast_keyidx;
+ else
+ key_idx = peer->mcast_keyidx;
+
+ key_conf = peer->keys[key_idx];
+
+ data = msdu->data + head_len;
+ data_len = msdu->len - head_len - tail_len;
+ key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
+
+ ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
+ if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
+ goto mic_fail;
+
+ return 0;
+
+mic_fail:
+ (ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
+ (ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
+
+ rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
+ RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
+ skb_pull(msdu, hal_rx_desc_sz);
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+ ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
+ ieee80211_rx(ar->hw, msdu);
+ return -EINVAL;
+}
+
+static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
+ enum hal_encrypt_type enctype, u32 flags)
+{
+ struct ieee80211_hdr *hdr;
+ size_t hdr_len;
+ size_t crypto_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ if (!flags)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
+
+ if (flags & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_mic_len(ar, enctype));
+
+ if (flags & RX_FLAG_ICV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath11k_dp_rx_crypto_icv_len(ar, enctype));
+
+ if (flags & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
+
+ memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
+ (void *)msdu->data + hal_rx_desc_sz, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
+}
+
+static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
+ struct ath11k_peer *peer,
+ struct dp_rx_tid *rx_tid,
+ struct sk_buff **defrag_skb)
+{
+ struct hal_rx_desc *rx_desc;
+ struct sk_buff *skb, *first_frag, *last_frag;
+ struct ieee80211_hdr *hdr;
+ struct rx_attention *rx_attention;
+ enum hal_encrypt_type enctype;
+ bool is_decrypted = false;
+ int msdu_len = 0;
+ int extra_space;
+ u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ last_frag = skb_peek_tail(&rx_tid->rx_frags);
+
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ flags = 0;
+ rx_desc = (struct hal_rx_desc *)skb->data;
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
+ }
+
+ if (is_decrypted) {
+ if (skb != first_frag)
+ flags |= RX_FLAG_IV_STRIPPED;
+ if (skb != last_frag)
+ flags |= RX_FLAG_ICV_STRIPPED |
+ RX_FLAG_MIC_STRIPPED;
+ }
+
+ /* RX fragments are always raw packets */
+ if (skb != last_frag)
+ skb_trim(skb, skb->len - FCS_LEN);
+ ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
+
+ if (skb != first_frag)
+ skb_pull(skb, hal_rx_desc_sz +
+ ieee80211_hdrlen(hdr->frame_control));
+ msdu_len += skb->len;
+ }
+
+ extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
+ if (extra_space > 0 &&
+ (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
+ return -ENOMEM;
+
+ __skb_unlink(first_frag, &rx_tid->rx_frags);
+ while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
+ skb_put_data(first_frag, skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ }
+
+ hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
+ ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
+
+ if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
+ first_frag = NULL;
+
+ *defrag_skb = first_frag;
+ return 0;
+}
+
+static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
+ struct sk_buff *defrag_skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
+ struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
+ struct hal_reo_entrance_ring *reo_ent_ring;
+ struct hal_reo_dest_ring *reo_dest_ring;
+ struct dp_link_desc_bank *link_desc_banks;
+ struct hal_rx_msdu_link *msdu_link;
+ struct hal_rx_msdu_details *msdu0;
+ struct hal_srng *srng;
+ dma_addr_t paddr;
+ u32 desc_bank, msdu_info, mpdu_info;
+ u32 dst_idx, cookie, hal_rx_desc_sz;
+ int ret, buf_id;
+
+ hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
+ link_desc_banks = ab->dp.link_desc_banks;
+ reo_dest_ring = rx_tid->dst_ring_desc;
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
+ msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr));
+ msdu0 = &msdu_link->msdu_link[0];
+ dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
+ memset(msdu0, 0, sizeof(*msdu0));
+
+ msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
+ defrag_skb->len - hal_rx_desc_sz) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
+ FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
+ msdu0->rx_msdu_info.info0 = msdu_info;
+
+ /* change msdu len in hal rx desc */
+ ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
+
+ paddr = dma_map_single(ab->dev, defrag_skb->data,
+ defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr))
+ return -ENOMEM;
+
+ spin_lock_bh(&rx_refill_ring->idr_lock);
+ buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
+ rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
+ spin_unlock_bh(&rx_refill_ring->idr_lock);
+ if (buf_id < 0) {
+ ret = -ENOMEM;
+ goto err_unmap_dma;
+ }
+
+ ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
+ cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
+ FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
+
+ ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
+ ab->hw_params.hal_params->rx_buf_rbm);
+
+ /* Fill mpdu details into reo entrance ring */
+ srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ reo_ent_ring = (struct hal_reo_entrance_ring *)
+ ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_ent_ring) {
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+ ret = -ENOSPC;
+ goto err_free_idr;
+ }
+ memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
+ ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
+ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
+
+ mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
+ FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
+
+ reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
+ reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
+ reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
+ reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
+ FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
+ reo_dest_ring->info0)) |
+ FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return 0;
+
+err_free_idr:
+ spin_lock_bh(&rx_refill_ring->idr_lock);
+ idr_remove(&rx_refill_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_refill_ring->idr_lock);
+err_unmap_dma:
+ dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+ DMA_TO_DEVICE);
+ return ret;
+}
+
+static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
+ struct sk_buff *a, struct sk_buff *b)
+{
+ int frag1, frag2;
+
+ frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
+ frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
+
+ return frag1 - frag2;
+}
+
+static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
+ struct sk_buff_head *frag_list,
+ struct sk_buff *cur_frag)
+{
+ struct sk_buff *skb;
+ int cmp;
+
+ skb_queue_walk(frag_list, skb) {
+ cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
+ if (cmp < 0)
+ continue;
+ __skb_queue_before(frag_list, skb, cur_frag);
+ return;
+ }
+ __skb_queue_tail(frag_list, cur_frag);
+}
+
+static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ u64 pn = 0;
+ u8 *ehdr;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
+ ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
+
+ pn = ehdr[0];
+ pn |= (u64)ehdr[1] << 8;
+ pn |= (u64)ehdr[4] << 16;
+ pn |= (u64)ehdr[5] << 24;
+ pn |= (u64)ehdr[6] << 32;
+ pn |= (u64)ehdr[7] << 40;
+
+ return pn;
+}
+
+static bool
+ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
+{
+ enum hal_encrypt_type encrypt_type;
+ struct sk_buff *first_frag, *skb;
+ struct hal_rx_desc *desc;
+ u64 last_pn;
+ u64 cur_pn;
+
+ first_frag = skb_peek(&rx_tid->rx_frags);
+ desc = (struct hal_rx_desc *)first_frag->data;
+
+ encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
+ if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
+ encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
+ return true;
+
+ last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
+ skb_queue_walk(&rx_tid->rx_frags, skb) {
+ if (skb == first_frag)
+ continue;
+
+ cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
+ if (cur_pn != last_pn + 1)
+ return false;
+ last_pn = cur_pn;
+ }
+ return true;
+}
+
+static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ u32 *ring_desc)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct hal_rx_desc *rx_desc;
+ struct ath11k_peer *peer;
+ struct dp_rx_tid *rx_tid;
+ struct sk_buff *defrag_skb = NULL;
+ u32 peer_id;
+ u16 seqno, frag_no;
+ u8 tid;
+ int ret = 0;
+ bool more_frags;
+ bool is_mcbc;
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
+ tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
+ seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
+ frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
+ more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
+ is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+
+ /* Multicast/Broadcast fragments are not expected */
+ if (is_mcbc)
+ return -EINVAL;
+
+ if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
+ !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
+ tid > IEEE80211_NUM_TIDS)
+ return -EINVAL;
+
+ /* received unfragmented packet in reo
+ * exception ring, this shouldn't happen
+ * as these packets typically come from
+ * reo2sw srngs.
+ */
+ if (WARN_ON_ONCE(!frag_no && !more_frags))
+ return -EINVAL;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer) {
+ ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
+ peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+ if (!peer->dp_setup_done) {
+ ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
+ peer->addr, peer_id);
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ rx_tid = &peer->rx_tid[tid];
+
+ if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
+ skb_queue_empty(&rx_tid->rx_frags)) {
+ /* Flush stored fragments and start a new sequence */
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+ rx_tid->cur_sn = seqno;
+ }
+
+ if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
+ /* Fragment already present */
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
+ __skb_queue_tail(&rx_tid->rx_frags, msdu);
+ else
+ ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
+
+ rx_tid->rx_frag_bitmap |= BIT(frag_no);
+ if (!more_frags)
+ rx_tid->last_frag_no = frag_no;
+
+ if (frag_no == 0) {
+ rx_tid->dst_ring_desc = kmemdup(ring_desc,
+ sizeof(*rx_tid->dst_ring_desc),
+ GFP_ATOMIC);
+ if (!rx_tid->dst_ring_desc) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ ath11k_dp_rx_link_desc_return(ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ if (!rx_tid->last_frag_no ||
+ rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
+ mod_timer(&rx_tid->frag_timer, jiffies +
+ ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
+ goto out_unlock;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ del_timer_sync(&rx_tid->frag_timer);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_id(ab, peer_id);
+ if (!peer)
+ goto err_frags_cleanup;
+
+ if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
+ goto err_frags_cleanup;
+
+ if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
+ goto err_frags_cleanup;
+
+ if (!defrag_skb)
+ goto err_frags_cleanup;
+
+ if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
+ goto err_frags_cleanup;
+
+ ath11k_dp_rx_frags_cleanup(rx_tid, false);
+ goto out_unlock;
+
+err_frags_cleanup:
+ dev_kfree_skb_any(defrag_skb);
+ ath11k_dp_rx_frags_cleanup(rx_tid, true);
+out_unlock:
+ spin_unlock_bh(&ab->base_lock);
+ return ret;
+}
+
+static int
+ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ struct sk_buff *msdu;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_rx_desc *rx_desc;
+ u8 *hdr_status;
+ u16 msdu_len;
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ return -EINVAL;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return 0;
+ }
+
+ rcu_read_lock();
+ if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
+ if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
+ hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
+ ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
+ sizeof(struct ieee80211_hdr));
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
+ sizeof(struct hal_rx_desc));
+ dev_kfree_skb_any(msdu);
+ goto exit;
+ }
+
+ skb_put(msdu, hal_rx_desc_sz + msdu_len);
+
+ if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
+ dev_kfree_skb_any(msdu);
+ ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+exit:
+ rcu_read_unlock();
+ return 0;
+}
+
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget)
+{
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ struct dp_link_desc_bank *link_desc_banks;
+ enum hal_rx_buf_return_buf_manager rbm;
+ int tot_n_bufs_reaped, quota, ret, i;
+ int n_bufs_reaped[MAX_RADIOS] = {0};
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_srng *reo_except;
+ u32 desc_bank, num_msdus;
+ struct hal_srng *srng;
+ struct ath11k_dp *dp;
+ void *link_desc_va;
+ int buf_id, mac_id;
+ struct ath11k *ar;
+ dma_addr_t paddr;
+ u32 *desc;
+ bool is_frag;
+ u8 drop = 0;
+
+ tot_n_bufs_reaped = 0;
+ quota = budget;
+
+ dp = &ab->dp;
+ reo_except = &dp->reo_except_ring;
+ link_desc_banks = dp->link_desc_banks;
+
+ srng = &ab->hal.srng_list[reo_except->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
+
+ ab->soc_stats.err_ring_pkts++;
+ ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
+ &desc_bank);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse error reo desc %d\n",
+ ret);
+ continue;
+ }
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
+ &rbm);
+ if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
+ rbm != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_REL_MSDU);
+ continue;
+ }
+
+ is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
+
+ /* Process only rx fragments with one msdu per link desc below, and drop
+ * msdu's indicated due to error reasons.
+ */
+ if (!is_frag || num_msdus > 1) {
+ drop = 1;
+ /* Return the link desc back to wbm idle list */
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
+ msdu_cookies[i]);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
+ n_bufs_reaped[mac_id]++;
+ tot_n_bufs_reaped++;
+ }
+ }
+
+ if (tot_n_bufs_reaped >= quota) {
+ tot_n_bufs_reaped = quota;
+ goto exit;
+ }
+
+ budget = quota - tot_n_bufs_reaped;
+ }
+
+exit:
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!n_bufs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
+ ab->hw_params.hal_params->rx_buf_rbm);
+ }
+
+ return tot_n_bufs_reaped;
+}
+
+static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
+ int msdu_len,
+ struct sk_buff_head *msdu_list)
+{
+ struct sk_buff *skb, *tmp;
+ struct ath11k_skb_rxcb *rxcb;
+ int n_buffs;
+
+ n_buffs = DIV_ROUND_UP(msdu_len,
+ (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
+
+ skb_queue_walk_safe(msdu_list, skb, tmp) {
+ rxcb = ATH11K_SKB_RXCB(skb);
+ if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
+ rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
+ if (!n_buffs)
+ break;
+ __skb_unlink(skb, msdu_list);
+ dev_kfree_skb_any(skb);
+ n_buffs--;
+ }
+ }
+}
+
+static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ struct rx_attention *rx_attention;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
+
+ if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
+ /* First buffer will be freed by the caller, so deduct it's length */
+ msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
+ ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
+ return -EINVAL;
+ }
+
+ rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
+ if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
+ ath11k_warn(ar->ab,
+ "msdu_done bit not set in null_q_des processing\n");
+ __skb_queue_purge(msdu_list);
+ return -EIO;
+ }
+
+ /* Handle NULL queue descriptor violations arising out a missing
+ * REO queue for a given peer or a given TID. This typically
+ * may happen if a packet is received on a QOS enabled TID before the
+ * ADDBA negotiation for that TID, when the TID queue is setup. Or
+ * it may also happen for MC/BC frames if they are not routed to the
+ * non-QOS TID queue, in the absence of any other default TID queue.
+ * This error can show up both in a REO destination or WBM release ring.
+ */
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
+
+ if (rxcb->is_frag) {
+ skb_pull(msdu, hal_rx_desc_sz);
+ } else {
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
+
+ if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
+ return -EINVAL;
+
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+ }
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
+
+ rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
+
+ /* Please note that caller will having the access to msdu and completing
+ * rx with mac80211. Need not worry about cleaning up amsdu_list.
+ */
+
+ return 0;
+}
+
+static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
+ if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+ drop = true;
+ break;
+ case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
+ /* TODO: Do not drop PN failed packets in the driver;
+ * instead, it is good to drop such packets in mac80211
+ * after incrementing the replay counters.
+ */
+ fallthrough;
+ default:
+ /* TODO: Review other errors and process them to mac80211
+ * as appropriate.
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ u16 msdu_len;
+ struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
+ u8 l3pad_bytes;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
+
+ rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
+ rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
+
+ l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
+ msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
+ skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
+ skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
+
+ ath11k_dp_rx_h_ppdu(ar, desc, status);
+
+ status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+ RX_FLAG_DECRYPTED);
+
+ ath11k_dp_rx_h_undecap(ar, msdu, desc,
+ HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+}
+
+static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar, struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ bool drop = false;
+
+ ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+
+ switch (rxcb->err_code) {
+ case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
+ ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+ break;
+ default:
+ /* TODO: Review other rxdma error code to check if anything is
+ * worth reporting to mac80211
+ */
+ drop = true;
+ break;
+ }
+
+ return drop;
+}
+
+static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
+ struct napi_struct *napi,
+ struct sk_buff *msdu,
+ struct sk_buff_head *msdu_list)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct ieee80211_rx_status rxs = {0};
+ bool drop = true;
+
+ switch (rxcb->err_rel_src) {
+ case HAL_WBM_REL_SRC_MODULE_REO:
+ drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+ break;
+ case HAL_WBM_REL_SRC_MODULE_RXDMA:
+ drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+ break;
+ default:
+ /* msdu will get freed */
+ break;
+ }
+
+ if (drop) {
+ dev_kfree_skb_any(msdu);
+ return;
+ }
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
+}
+
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_rxdma_ring *rx_ring;
+ struct hal_rx_wbm_rel_info err_info;
+ struct hal_srng *srng;
+ struct sk_buff *msdu;
+ struct sk_buff_head msdu_list[MAX_RADIOS];
+ struct ath11k_skb_rxcb *rxcb;
+ u32 *rx_desc;
+ int buf_id, mac_id;
+ int num_buffs_reaped[MAX_RADIOS] = {0};
+ int total_num_buffs_reaped = 0;
+ int ret, i;
+
+ for (i = 0; i < ab->num_radios; i++)
+ __skb_queue_head_init(&msdu_list[i]);
+
+ srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (budget) {
+ rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
+ if (!rx_desc)
+ break;
+
+ ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to parse rx error in wbm_rel ring desc %d\n",
+ ret);
+ continue;
+ }
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
+ mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
+
+ ar = ab->pdevs[mac_id].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
+ buf_id, mac_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ msdu->len + skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+
+ num_buffs_reaped[mac_id]++;
+ total_num_buffs_reaped++;
+ budget--;
+
+ if (err_info.push_reason !=
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ dev_kfree_skb_any(msdu);
+ continue;
+ }
+
+ rxcb->err_rel_src = err_info.err_rel_src;
+ rxcb->err_code = err_info.err_code;
+ rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
+ __skb_queue_tail(&msdu_list[mac_id], msdu);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (!total_num_buffs_reaped)
+ goto done;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!num_buffs_reaped[i])
+ continue;
+
+ ar = ab->pdevs[i].ar;
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
+ ab->hw_params.hal_params->rx_buf_rbm);
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < ab->num_radios; i++) {
+ if (!rcu_dereference(ab->pdevs_active[i])) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ ar = ab->pdevs[i].ar;
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ __skb_queue_purge(&msdu_list[i]);
+ continue;
+ }
+
+ while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
+ ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
+ }
+ rcu_read_unlock();
+done:
+ return total_num_buffs_reaped;
+}
+
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
+{
+ struct ath11k *ar;
+ struct dp_srng *err_ring;
+ struct dp_rxdma_ring *rx_ring;
+ struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
+ struct hal_srng *srng;
+ u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+ enum hal_rx_buf_return_buf_manager rbm;
+ enum hal_reo_entr_rxdma_ecode rxdma_err_code;
+ struct ath11k_skb_rxcb *rxcb;
+ struct sk_buff *skb;
+ struct hal_reo_entrance_ring *entr_ring;
+ void *desc;
+ int num_buf_freed = 0;
+ int quota = budget;
+ dma_addr_t paddr;
+ u32 desc_bank;
+ void *link_desc_va;
+ int num_msdus;
+ int i;
+ int buf_id;
+
+ ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+ err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
+ mac_id)];
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
+ srng = &ab->hal.srng_list[err_ring->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while (quota-- &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
+
+ entr_ring = (struct hal_reo_entrance_ring *)desc;
+ rxdma_err_code =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ entr_ring->info1);
+ ab->soc_stats.rxdma_error[rxdma_err_code]++;
+
+ link_desc_va = link_desc_banks[desc_bank].vaddr +
+ (paddr - link_desc_banks[desc_bank].paddr);
+ ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
+ msdu_cookies, &rbm);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_cookies[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!skb) {
+ ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ continue;
+ }
+
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+
+ num_buf_freed++;
+ }
+
+ ath11k_dp_rx_link_desc_return(ab, desc,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+
+ if (num_buf_freed)
+ ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
+ ab->hw_params.hal_params->rx_buf_rbm);
+
+ return budget - quota;
+}
+
+void ath11k_dp_process_reo_status(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_srng *srng;
+ struct dp_reo_cmd *cmd, *tmp;
+ bool found = false;
+ u32 *reo_desc;
+ u16 tag;
+ struct hal_reo_status reo_status;
+
+ srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
+
+ memset(&reo_status, 0, sizeof(reo_status));
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+
+ while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
+ tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
+
+ switch (tag) {
+ case HAL_REO_GET_QUEUE_STATS_STATUS:
+ ath11k_hal_reo_status_queue_stats(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_QUEUE_STATUS:
+ ath11k_hal_reo_flush_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_CACHE_STATUS:
+ ath11k_hal_reo_flush_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UNBLOCK_CACHE_STATUS:
+ ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
+ ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
+ ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
+ &reo_status);
+ break;
+ case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
+ ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
+ &reo_status);
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo status type %d\n", tag);
+ continue;
+ }
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
+ if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
+ found = true;
+ list_del(&cmd->list);
+ break;
+ }
+ }
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ if (found) {
+ cmd->handler(dp, (void *)&cmd->data,
+ reo_status.uniform_hdr.cmd_status);
+ kfree(cmd);
+ }
+
+ found = false;
+ }
+
+ ath11k_hal_srng_access_end(ab, srng);
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+
+ ath11k_dp_rx_pdev_srng_free(ar);
+ ath11k_dp_rxdma_pdev_buf_free(ar);
+}
+
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ u32 ring_id;
+ int i;
+ int ret;
+
+ ret = ath11k_dp_rx_pdev_srng_alloc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rx srngs\n");
+ return ret;
+ }
+
+ ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup rxdma ring\n");
+ return ret;
+ }
+
+ ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->hw_params.rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i, HAL_RXDMA_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rxdma_err_dst_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i, HAL_RXDMA_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ if (!ab->hw_params.rxdma1_enable)
+ goto config_refill_ring;
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id, HAL_RXDMA_MONITOR_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_dst_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+ ret = ath11k_dp_tx_htt_srng_setup(ab,
+ dp->rxdma_mon_desc_ring.ring_id,
+ mac_id, HAL_RXDMA_MONITOR_DESC);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
+ ret);
+ return ret;
+ }
+
+config_refill_ring:
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to configure mon_status_refill_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
+{
+ if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
+ *frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
+ *total_len -= *frag_len;
+ } else {
+ *frag_len = *total_len;
+ *total_len = 0;
+ }
+}
+
+static
+int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
+ void *p_last_buf_addr_info,
+ u8 mac_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_srng *dp_srng;
+ void *hal_srng;
+ void *src_srng_desc;
+ int ret = 0;
+
+ if (ar->ab->hw_params.rxdma1_enable) {
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ } else {
+ dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ }
+
+ ath11k_hal_srng_access_begin(ar->ab, hal_srng);
+
+ src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
+
+ if (src_srng_desc) {
+ struct ath11k_buffer_addr *src_desc =
+ (struct ath11k_buffer_addr *)src_srng_desc;
+
+ *src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "Monitor Link Desc Ring %d Full", mac_id);
+ ret = -ENOMEM;
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, hal_srng);
+ return ret;
+}
+
+static
+void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ u8 *rbm,
+ void **pp_buf_addr_info)
+{
+ struct hal_rx_msdu_link *msdu_link =
+ (struct hal_rx_msdu_link *)rx_msdu_link_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
+
+ ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
+
+ *pp_buf_addr_info = (void *)buf_addr_info;
+}
+
+static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+ if (skb->len > len) {
+ skb_trim(skb, len);
+ } else {
+ if (skb_tailroom(skb) < len - skb->len) {
+ if ((pskb_expand_head(skb, 0,
+ len - skb->len - skb_tailroom(skb),
+ GFP_ATOMIC))) {
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ }
+ skb_put(skb, (len - skb->len));
+ }
+ return 0;
+}
+
+static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
+ void *msdu_link_desc,
+ struct hal_rx_msdu_list *msdu_list,
+ u16 *num_msdus)
+{
+ struct hal_rx_msdu_details *msdu_details = NULL;
+ struct rx_msdu_desc *msdu_desc_info = NULL;
+ struct hal_rx_msdu_link *msdu_link = NULL;
+ int i;
+ u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
+ u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
+ u8 tmp = 0;
+
+ msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
+ msdu_details = &msdu_link->msdu_link[0];
+
+ for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
+ if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu_details[i].buf_addr_info.info0) == 0) {
+ msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
+ msdu_desc_info->info0 |= last;
+ ;
+ break;
+ }
+ msdu_desc_info = &msdu_details[i].rx_msdu_info;
+
+ if (!i)
+ msdu_desc_info->info0 |= first;
+ else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
+ msdu_desc_info->info0 |= last;
+ msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
+ msdu_list->msdu_info[i].msdu_len =
+ HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
+ msdu_list->sw_cookie[i] =
+ FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu_details[i].buf_addr_info.info1);
+ tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu_details[i].buf_addr_info.info1);
+ msdu_list->rbm[i] = tmp;
+ }
+ *num_msdus = i;
+}
+
+static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
+ u32 *rx_bufs_used)
+{
+ u32 ret = 0;
+
+ if ((*ppdu_id < msdu_ppdu_id) &&
+ ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ } else if ((*ppdu_id > msdu_ppdu_id) &&
+ ((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
+ /* mon_dst is behind than mon_status
+ * skip dst_ring and free it
+ */
+ *rx_bufs_used += 1;
+ *ppdu_id = msdu_ppdu_id;
+ ret = msdu_ppdu_id;
+ }
+ return ret;
+}
+
+static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
+ bool *is_frag, u32 *total_len,
+ u32 *frag_len, u32 *msdu_cnt)
+{
+ if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
+ if (!*is_frag) {
+ *total_len = info->msdu_len;
+ *is_frag = true;
+ }
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ if (*is_frag) {
+ ath11k_dp_mon_set_frag_len(total_len,
+ frag_len);
+ } else {
+ *frag_len = info->msdu_len;
+ }
+ *is_frag = false;
+ *msdu_cnt -= 1;
+ }
+}
+
+static u32
+ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu, u32 *npackets,
+ u32 *ppdu_id)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct hal_rx_msdu_list msdu_list;
+ void *p_buf_addr_info, *p_last_buf_addr_info;
+ struct hal_rx_desc *rx_desc;
+ void *rx_msdu_link_desc;
+ dma_addr_t paddr;
+ u16 num_msdus = 0;
+ u32 rx_buf_size, rx_pkt_offset, sw_cookie;
+ u32 rx_bufs_used = 0, i = 0;
+ u32 msdu_ppdu_id = 0, msdu_cnt = 0;
+ u32 total_len = 0, frag_len = 0;
+ bool is_frag, is_first_msdu;
+ bool drop_mpdu = false;
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_reo_entrance_ring *ent_desc =
+ (struct hal_reo_entrance_ring *)ring_entry;
+ int buf_id;
+ u32 rx_link_buf_info[2];
+ u8 rbm;
+
+ if (!ar->ab->hw_params.rxdma1_enable)
+ rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
+ &sw_cookie,
+ &p_last_buf_addr_info, &rbm,
+ &msdu_cnt);
+
+ if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
+ ent_desc->info1) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ u8 rxdma_err =
+ FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
+ ent_desc->info1);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+
+ do {
+ if (pmon->mon_last_linkdesc_paddr == paddr) {
+ pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
+ return rx_bufs_used;
+ }
+
+ if (ar->ab->hw_params.rxdma1_enable)
+ rx_msdu_link_desc =
+ (void *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (paddr - pmon->link_desc_banks[sw_cookie].paddr);
+ else
+ rx_msdu_link_desc =
+ (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
+ (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
+
+ ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
+ &num_msdus);
+
+ for (i = 0; i < num_msdus; i++) {
+ u32 l2_hdr_offset;
+
+ if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d last_cookie %d is same\n",
+ i, pmon->mon_last_buf_cookie);
+ drop_mpdu = true;
+ pmon->rx_mon_stats.dup_mon_buf_cnt++;
+ continue;
+ }
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_list.sw_cookie[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ if (!msdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "msdu_pop: invalid buf_id %d\n", buf_id);
+ break;
+ }
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, *ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
+
+ if (is_first_msdu) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ pmon->mon_last_linkdesc_paddr = paddr;
+ goto next_msdu;
+ }
+
+ msdu_ppdu_id =
+ ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
+
+ if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
+ ppdu_id,
+ &rx_bufs_used)) {
+ if (rx_bufs_used) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+ return rx_bufs_used;
+ }
+ pmon->mon_last_linkdesc_paddr = paddr;
+ is_first_msdu = false;
+ }
+ ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
+ rx_bufs_used++;
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ }
+
+ ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
+
+ ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
+ &sw_cookie, &rbm,
+ &p_buf_addr_info);
+
+ if (ar->ab->hw_params.rxdma1_enable) {
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dp_rx_monitor_link_desc_return failed");
+ } else {
+ ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (paddr && msdu_cnt);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ if (msdu_cnt == 0)
+ *npackets = 1;
+
+ return rx_bufs_used;
+}
+
+static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
+{
+ u32 rx_pkt_offset, l2_hdr_offset;
+
+ rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
+ (struct hal_rx_desc *)msdu->data);
+ skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
+}
+
+static struct sk_buff *
+ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
+ u32 mac_id, struct sk_buff *head_msdu,
+ struct sk_buff *last_msdu,
+ struct ieee80211_rx_status *rxs, bool *fcs_err)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct sk_buff *msdu, *prev_buf;
+ struct hal_rx_desc *rx_desc;
+ char *hdr_desc;
+ u8 *dest, decap_format;
+ struct ieee80211_hdr_3addr *wh;
+ struct rx_attention *rx_attention;
+ u32 err_bitmap;
+
+ if (!head_msdu)
+ goto err_merge_fail;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
+ err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+
+ if (err_bitmap & DP_RX_MPDU_ERR_FCS)
+ *fcs_err = true;
+
+ if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
+ return NULL;
+
+ decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
+
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+
+ if (decap_format == DP_RX_DECAP_TYPE_RAW) {
+ ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
+
+ prev_buf = head_msdu;
+ msdu = head_msdu->next;
+
+ while (msdu) {
+ ath11k_dp_rx_msdus_set_payload(ar, msdu);
+
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+
+ prev_buf->next = NULL;
+
+ skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+ } else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
+ u8 qos_pkt = 0;
+
+ rx_desc = (struct hal_rx_desc *)head_msdu->data;
+ hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
+
+ /* Base size */
+ wh = (struct ieee80211_hdr_3addr *)hdr_desc;
+
+ if (ieee80211_is_data_qos(wh->frame_control))
+ qos_pkt = 1;
+
+ msdu = head_msdu;
+
+ while (msdu) {
+ ath11k_dp_rx_msdus_set_payload(ar, msdu);
+ if (qos_pkt) {
+ dest = skb_push(msdu, sizeof(__le16));
+ if (!dest)
+ goto err_merge_fail;
+ memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
+ }
+ prev_buf = msdu;
+ msdu = msdu->next;
+ }
+ dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
+ if (!dest)
+ goto err_merge_fail;
+
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "mpdu_buf %p mpdu_buf->len %u",
+ prev_buf, prev_buf->len);
+ } else {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "decap format %d is not supported!\n",
+ decap_format);
+ goto err_merge_fail;
+ }
+
+ return head_msdu;
+
+err_merge_fail:
+ return NULL;
+}
+
+static void
+ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
+}
+
+static void
+ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
+ u8 *rtap_buf)
+{
+ u32 rtap_len = 0;
+
+ put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
+ rtap_len += 2;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[0];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[1];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[2];
+ rtap_len += 1;
+
+ rtap_buf[rtap_len] = rx_status->he_RU[3];
+}
+
+static void ath11k_update_radiotap(struct ath11k *ar,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *mon_skb,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ u8 *ptr = NULL;
+
+ rxs->flag |= RX_FLAG_MACTIME_START;
+ rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
+
+ if (ppduinfo->nss)
+ rxs->nss = ppduinfo->nss;
+
+ if (ppduinfo->he_mu_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
+ ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
+ } else if (ppduinfo->he_flags) {
+ rxs->flag |= RX_FLAG_RADIOTAP_HE;
+ rxs->encoding = RX_ENC_HE;
+ ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
+ ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->vht_flags) {
+ rxs->encoding = RX_ENC_VHT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else if (ppduinfo->ht_flags) {
+ rxs->encoding = RX_ENC_HT;
+ rxs->rate_idx = ppduinfo->rate;
+ } else {
+ rxs->encoding = RX_ENC_LEGACY;
+ sband = &ar->mac.sbands[rxs->band];
+ rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
+ ppduinfo->cck_flag);
+ }
+
+ rxs->mactime = ppduinfo->tsft;
+}
+
+static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
+ struct sk_buff *head_msdu,
+ struct hal_rx_mon_ppdu_info *ppduinfo,
+ struct sk_buff *tail_msdu,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct sk_buff *mon_skb, *skb_next, *header;
+ struct ieee80211_rx_status *rxs = &dp->rx_status;
+ bool fcs_err = false;
+
+ mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
+ tail_msdu, rxs, &fcs_err);
+
+ if (!mon_skb)
+ goto mon_deliver_fail;
+
+ header = mon_skb;
+
+ rxs->flag = 0;
+
+ if (fcs_err)
+ rxs->flag = RX_FLAG_FAILED_FCS_CRC;
+
+ do {
+ skb_next = mon_skb->next;
+ if (!skb_next)
+ rxs->flag &= ~RX_FLAG_AMSDU_MORE;
+ else
+ rxs->flag |= RX_FLAG_AMSDU_MORE;
+
+ if (mon_skb == header) {
+ header = NULL;
+ rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+ rxs->flag |= RX_FLAG_ONLY_MONITOR;
+ ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
+
+ ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+ mon_skb = skb_next;
+ } while (mon_skb);
+ rxs->flag = 0;
+
+ return 0;
+
+mon_deliver_fail:
+ mon_skb = head_msdu;
+ while (mon_skb) {
+ skb_next = mon_skb->next;
+ dev_kfree_skb_any(mon_skb);
+ mon_skb = skb_next;
+ }
+ return -EINVAL;
+}
+
+/* The destination ring processing is stuck if the destination is not
+ * moving while status ring moves 16 PPDU. The destination ring processing
+ * skips this destination ring PPDU as a workaround.
+ */
+#define MON_DEST_RING_STUCK_MAX_CNT 16
+
+static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
+ u32 quota, struct napi_struct *napi)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ const struct ath11k_hw_hal_params *hal_params;
+ void *ring_entry;
+ void *mon_dst_srng;
+ u32 ppdu_id;
+ u32 rx_bufs_used;
+ u32 ring_id;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ u32 npackets = 0;
+ u32 mpdu_rx_bufs_used;
+
+ if (ar->ab->hw_params.rxdma1_enable)
+ ring_id = dp->rxdma_mon_dst_ring.ring_id;
+ else
+ ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
+
+ mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
+
+ if (!mon_dst_srng) {
+ ath11k_warn(ar->ab,
+ "HAL Monitor Destination Ring Init Failed -- %p",
+ mon_dst_srng);
+ return;
+ }
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+
+ ppdu_id = pmon->mon_ppdu_info.ppdu_id;
+ rx_bufs_used = 0;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ struct sk_buff *head_msdu, *tail_msdu;
+
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ &npackets, &ppdu_id);
+
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (mpdu_rx_bufs_used) {
+ dp->mon_dest_ring_stuck_cnt = 0;
+ } else {
+ dp->mon_dest_ring_stuck_cnt++;
+ rx_mon_stats->dest_mon_not_reaped++;
+ }
+
+ if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
+ rx_mon_stats->dest_mon_stuck++;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
+ pmon->mon_ppdu_info.ppdu_id, ppdu_id,
+ dp->mon_dest_ring_stuck_cnt,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ pmon->mon_ppdu_info.ppdu_id = ppdu_id;
+ continue;
+ }
+
+ if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
+ ppdu_id, pmon->mon_ppdu_info.ppdu_id,
+ rx_mon_stats->dest_mon_not_reaped,
+ rx_mon_stats->dest_mon_stuck);
+ break;
+ }
+ if (head_msdu && tail_msdu) {
+ ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
+ tail_msdu, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ }
+
+ ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ }
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
+ rx_mon_stats->dest_ppdu_done++;
+ hal_params = ar->ab->hw_params.hal_params;
+
+ if (ar->ab->hw_params.rxdma1_enable)
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ hal_params->rx_buf_rbm);
+ else
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rx_refill_buf_ring,
+ rx_bufs_used,
+ hal_params->rx_buf_rbm);
+ }
+}
+
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
+ enum hal_rx_mon_status hal_status;
+ struct sk_buff *skb;
+ struct sk_buff_head skb_list;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int num_buffs_reaped = 0;
+ u32 rx_buf_sz;
+ u16 log_type;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
+ struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
+ struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
+
+ __skb_queue_head_init(&skb_list);
+
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
+ &skb_list);
+ if (!num_buffs_reaped)
+ goto exit;
+
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+
+ while ((skb = __skb_dequeue(&skb_list))) {
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ } else {
+ log_type = ATH11K_PKTLOG_TYPE_INVALID;
+ rx_buf_sz = 0;
+ }
+
+ if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+ hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
+ hal_status == HAL_TLV_STATUS_PPDU_DONE) {
+ rx_mon_stats->status_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
+ ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ }
+
+ if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
+ hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+ dev_kfree_skb_any(skb);
+ continue;
+ }
+
+ rcu_read_lock();
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
+
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "failed to find the peer with peer_id %d\n",
+ ppdu_info->peer_id);
+ goto next_skb;
+ }
+
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
+
+ if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
+
+next_skb:
+ spin_unlock_bh(&ab->base_lock);
+ rcu_read_unlock();
+
+ dev_kfree_skb_any(skb);
+ memset(ppdu_info, 0, sizeof(*ppdu_info));
+ ppdu_info->peer_id = HAL_INVALID_PEERID;
+ }
+exit:
+ return num_buffs_reaped;
+}
+
+static u32
+ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
+ void *ring_entry, struct sk_buff **head_msdu,
+ struct sk_buff **tail_msdu,
+ struct hal_sw_mon_ring_entries *sw_mon_entries)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
+ struct sk_buff *msdu = NULL, *last = NULL;
+ struct hal_sw_monitor_ring *sw_desc = ring_entry;
+ struct hal_rx_msdu_list msdu_list;
+ struct hal_rx_desc *rx_desc;
+ struct ath11k_skb_rxcb *rxcb;
+ void *rx_msdu_link_desc;
+ void *p_buf_addr_info, *p_last_buf_addr_info;
+ int buf_id, i = 0;
+ u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
+ u32 rx_bufs_used = 0, msdu_cnt = 0;
+ u32 total_len = 0, frag_len = 0, sw_cookie;
+ u16 num_msdus = 0;
+ u8 rxdma_err, rbm;
+ bool is_frag, is_first_msdu;
+ bool drop_mpdu = false;
+
+ ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
+
+ sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
+ sw_mon_entries->end_of_ppdu = false;
+ sw_mon_entries->drop_ppdu = false;
+ p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
+ msdu_cnt = sw_mon_entries->msdu_cnt;
+
+ sw_mon_entries->end_of_ppdu =
+ FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
+ if (sw_mon_entries->end_of_ppdu)
+ return rx_bufs_used;
+
+ if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
+ sw_desc->info0) ==
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
+ rxdma_err =
+ FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
+ sw_desc->info0);
+ if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
+ rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
+ pmon->rx_mon_stats.dest_mpdu_drop++;
+ drop_mpdu = true;
+ }
+ }
+
+ is_frag = false;
+ is_first_msdu = true;
+
+ do {
+ rx_msdu_link_desc =
+ (u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (sw_mon_entries->mon_dst_paddr -
+ pmon->link_desc_banks[sw_cookie].paddr);
+
+ ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
+ &num_msdus);
+
+ for (i = 0; i < num_msdus; i++) {
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
+ msdu_list.sw_cookie[i]);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ msdu = idr_find(&rx_ring->bufs_idr, buf_id);
+ if (!msdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon msdu_pop: invalid buf_id %d\n",
+ buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+ break;
+ }
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ if (!rxcb->unmapped) {
+ dma_unmap_single(ar->ab->dev, rxcb->paddr,
+ msdu->len +
+ skb_tailroom(msdu),
+ DMA_FROM_DEVICE);
+ rxcb->unmapped = 1;
+ }
+ if (drop_mpdu) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon: i %d drop msdu %p *ppdu_id %x\n",
+ i, msdu, sw_mon_entries->ppdu_id);
+ dev_kfree_skb_any(msdu);
+ msdu_cnt--;
+ goto next_msdu;
+ }
+
+ rx_desc = (struct hal_rx_desc *)msdu->data;
+
+ rx_pkt_offset = sizeof(struct hal_rx_desc);
+ l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
+
+ if (is_first_msdu) {
+ if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
+ drop_mpdu = true;
+ dev_kfree_skb_any(msdu);
+ msdu = NULL;
+ goto next_msdu;
+ }
+ is_first_msdu = false;
+ }
+
+ ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
+ &is_frag, &total_len,
+ &frag_len, &msdu_cnt);
+
+ rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
+
+ ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
+
+ if (!(*head_msdu))
+ *head_msdu = msdu;
+ else if (last)
+ last->next = msdu;
+
+ last = msdu;
+next_msdu:
+ rx_bufs_used++;
+ }
+
+ ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
+ &sw_mon_entries->mon_dst_paddr,
+ &sw_mon_entries->mon_dst_sw_cookie,
+ &rbm,
+ &p_buf_addr_info);
+
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "full mon: dp_rx_monitor_link_desc_return failed\n");
+
+ p_last_buf_addr_info = p_buf_addr_info;
+
+ } while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
+
+ if (last)
+ last->next = NULL;
+
+ *tail_msdu = msdu;
+
+ return rx_bufs_used;
+}
+
+static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
+ struct dp_full_mon_mpdu *mon_mpdu,
+ struct sk_buff *head,
+ struct sk_buff *tail)
+{
+ mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
+ if (!mon_mpdu)
+ return -ENOMEM;
+
+ list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
+ mon_mpdu->head = head;
+ mon_mpdu->tail = tail;
+
+ return 0;
+}
+
+static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
+ struct dp_full_mon_mpdu *mon_mpdu)
+{
+ struct dp_full_mon_mpdu *tmp;
+ struct sk_buff *tmp_msdu, *skb_next;
+
+ if (list_empty(&dp->dp_full_mon_mpdu_list))
+ return;
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+
+ tmp_msdu = mon_mpdu->head;
+ while (tmp_msdu) {
+ skb_next = tmp_msdu->next;
+ dev_kfree_skb_any(tmp_msdu);
+ tmp_msdu = skb_next;
+ }
+
+ kfree(mon_mpdu);
+ }
+}
+
+static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
+ int mac_id,
+ struct ath11k_mon_data *pmon,
+ struct napi_struct *napi)
+{
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct dp_full_mon_mpdu *tmp;
+ struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+ struct sk_buff *head_msdu, *tail_msdu;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ int ret;
+
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
+ list_del(&mon_mpdu->list);
+ head_msdu = mon_mpdu->head;
+ tail_msdu = mon_mpdu->tail;
+ if (head_msdu && tail_msdu) {
+ ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
+ &pmon->mon_ppdu_info,
+ tail_msdu, napi);
+ rx_mon_stats->dest_mpdu_done++;
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
+ }
+ kfree(mon_mpdu);
+ }
+
+ return ret;
+}
+
+static int
+ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_sw_mon_ring_entries *sw_mon_entries;
+ int quota = 0, work = 0, count;
+
+ sw_mon_entries = &pmon->sw_mon_entries;
+
+ while (pmon->hold_mon_dst_ring) {
+ quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
+ napi, 1);
+ if (pmon->buf_state == DP_MON_STATUS_MATCH) {
+ count = sw_mon_entries->status_buf_count;
+ if (count > 1) {
+ quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
+ napi, count);
+ }
+
+ ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
+ pmon, napi);
+ pmon->hold_mon_dst_ring = false;
+ } else if (!pmon->mon_status_paddr ||
+ pmon->buf_state == DP_MON_STATUS_LEAD) {
+ sw_mon_entries->drop_ppdu = true;
+ pmon->hold_mon_dst_ring = false;
+ }
+
+ if (!quota)
+ break;
+
+ work += quota;
+ }
+
+ if (sw_mon_entries->drop_ppdu)
+ ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
+
+ return work;
+}
+
+static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_sw_mon_ring_entries *sw_mon_entries;
+ struct ath11k_pdev_mon_stats *rx_mon_stats;
+ struct sk_buff *head_msdu, *tail_msdu;
+ void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ void *ring_entry;
+ u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
+ int quota = 0, ret;
+ bool break_dst_ring = false;
+
+ spin_lock_bh(&pmon->mon_lock);
+
+ sw_mon_entries = &pmon->sw_mon_entries;
+ rx_mon_stats = &pmon->rx_mon_stats;
+
+ if (pmon->hold_mon_dst_ring) {
+ spin_unlock_bh(&pmon->mon_lock);
+ goto reap_status_ring;
+ }
+
+ ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
+ while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
+ head_msdu = NULL;
+ tail_msdu = NULL;
+
+ mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
+ &head_msdu,
+ &tail_msdu,
+ sw_mon_entries);
+ rx_bufs_used += mpdu_rx_bufs_used;
+
+ if (!sw_mon_entries->end_of_ppdu) {
+ if (head_msdu) {
+ ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
+ pmon->mon_mpdu,
+ head_msdu,
+ tail_msdu);
+ if (ret)
+ break_dst_ring = true;
+ }
+
+ goto next_entry;
+ } else {
+ if (!sw_mon_entries->ppdu_id &&
+ !sw_mon_entries->mon_status_paddr) {
+ break_dst_ring = true;
+ goto next_entry;
+ }
+ }
+
+ rx_mon_stats->dest_ppdu_done++;
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+ pmon->buf_state = DP_MON_STATUS_LAG;
+ pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
+ pmon->hold_mon_dst_ring = true;
+next_entry:
+ ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
+ mon_dst_srng);
+ if (break_dst_ring)
+ break;
+ }
+
+ ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+ spin_unlock_bh(&pmon->mon_lock);
+
+ if (rx_bufs_used) {
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM);
+ }
+
+reap_status_ring:
+ quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
+ napi, budget);
+
+ return quota;
+}
+
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget)
+{
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
+ int ret = 0;
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
+ ab->hw_params.full_monitor_mode)
+ ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
+ else
+ ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
+
+ return ret;
+}
+
+static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+
+ skb_queue_head_init(&pmon->rx_status_q);
+
+ pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
+
+ memset(&pmon->rx_mon_stats, 0,
+ sizeof(pmon->rx_mon_stats));
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+ struct hal_srng *mon_desc_srng = NULL;
+ struct dp_srng *dp_srng;
+ int ret = 0;
+ u32 n_link_desc = 0;
+
+ ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
+ return ret;
+ }
+
+ /* if rxdma1_enable is false, no need to setup
+ * rxdma_mon_desc_ring.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable)
+ return 0;
+
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ n_link_desc = dp_srng->size /
+ ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
+ mon_desc_srng =
+ &ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
+
+ ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
+ n_link_desc);
+ if (ret) {
+ ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
+ return ret;
+ }
+ pmon->mon_last_linkdesc_paddr = 0;
+ pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
+ spin_lock_init(&pmon->mon_lock);
+
+ return 0;
+}
+
+static int ath11k_dp_mon_link_free(struct ath11k *ar)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_mon_data *pmon = &dp->mon_data;
+
+ ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
+ HAL_RXDMA_MONITOR_DESC,
+ &dp->rxdma_mon_desc_ring);
+ return 0;
+}
+
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
+{
+ ath11k_dp_mon_link_free(ar);
+ return 0;
+}
+
+int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
+{
+ /* start reap timer */
+ mod_timer(&ab->mon_reap_timer,
+ jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+
+ return 0;
+}
+
+int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
+{
+ int ret;
+
+ if (stop_timer)
+ del_timer_sync(&ab->mon_reap_timer);
+
+ /* reap all the monitor related rings */
+ ret = ath11k_dp_purge_mon_ring(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
new file mode 100644
index 0000000000..623da3bf9d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_DP_RX_H
+#define ATH11K_DP_RX_H
+
+#include "core.h"
+#include "rx_desc.h"
+#include "debug.h"
+
+#define DP_MAX_NWIFI_HDR_LEN 30
+
+#define DP_RX_MPDU_ERR_FCS BIT(0)
+#define DP_RX_MPDU_ERR_DECRYPT BIT(1)
+#define DP_RX_MPDU_ERR_TKIP_MIC BIT(2)
+#define DP_RX_MPDU_ERR_AMSDU_ERR BIT(3)
+#define DP_RX_MPDU_ERR_OVERFLOW BIT(4)
+#define DP_RX_MPDU_ERR_MSDU_LEN BIT(5)
+#define DP_RX_MPDU_ERR_MPDU_LEN BIT(6)
+#define DP_RX_MPDU_ERR_UNENCRYPTED_FRAME BIT(7)
+
+enum dp_rx_decap_type {
+ DP_RX_DECAP_TYPE_RAW,
+ DP_RX_DECAP_TYPE_NATIVE_WIFI,
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX,
+ DP_RX_DECAP_TYPE_8023,
+};
+
+struct ath11k_dp_amsdu_subframe_hdr {
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ __be16 len;
+} __packed;
+
+struct ath11k_dp_rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
+ struct ieee80211_ampdu_params *params);
+int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
+ const u8 *peer_addr,
+ enum set_key_cmd key_cmd,
+ struct ieee80211_key_conf *key);
+void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer);
+void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer);
+void ath11k_peer_rx_tid_delete(struct ath11k *ar,
+ struct ath11k_peer *peer, u8 tid);
+int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
+ u8 tid, u32 ba_win_sz, u16 ssn,
+ enum hal_pn_type pn_type);
+void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab);
+void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab);
+int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int pdev_idx);
+void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab);
+void ath11k_dp_process_reo_status(struct ath11k_base *ab);
+int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget);
+int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
+ int budget);
+int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi,
+ int budget);
+int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr);
+int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
+int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
+ struct napi_struct *napi, int budget);
+int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
+ struct dp_rxdma_ring *rx_ring,
+ int req_entries,
+ enum hal_rx_buf_return_buf_manager mgr);
+int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
+int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
+int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id);
+
+int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab);
+int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer);
+
+#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
new file mode 100644
index 0000000000..0dda76f7a4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -0,0 +1,1297 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
+#include "debugfs_sta.h"
+#include "hw.h"
+#include "peer.h"
+#include "mac.h"
+
+static enum hal_tcl_encap_type
+ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath11k_base *ab = arvif->ar->ab;
+
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ return HAL_TCL_ENCAP_TYPE_RAW;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+ return HAL_TCL_ENCAP_TYPE_ETHERNET;
+
+ return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
+}
+
+static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ u8 *qos_ctl;
+
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ return;
+
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
+
+ hdr = (void *)skb->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
+}
+
+static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
+
+ if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ else if (!ieee80211_is_data_qos(hdr->frame_control))
+ return HAL_DESC_REO_NON_QOS_TID;
+ else
+ return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+}
+
+enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ return HAL_ENCRYPT_TYPE_WEP_40;
+ case WLAN_CIPHER_SUITE_WEP104:
+ return HAL_ENCRYPT_TYPE_WEP_104;
+ case WLAN_CIPHER_SUITE_TKIP:
+ return HAL_ENCRYPT_TYPE_TKIP_MIC;
+ case WLAN_CIPHER_SUITE_CCMP:
+ return HAL_ENCRYPT_TYPE_CCMP_128;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ return HAL_ENCRYPT_TYPE_CCMP_256;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return HAL_ENCRYPT_TYPE_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return HAL_ENCRYPT_TYPE_AES_GCMP_256;
+ default:
+ return HAL_ENCRYPT_TYPE_OPEN;
+ }
+}
+
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ath11k_sta *arsta, struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct hal_tx_info ti = {0};
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct hal_srng *tcl_ring;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct dp_tx_ring *tx_ring;
+ void *hal_tcl_desc;
+ u8 pool_id;
+ u8 hal_ring_id;
+ int ret;
+ u32 ring_selector = 0;
+ u8 ring_map = 0;
+ bool tcl_ring_retry;
+
+ if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+ return -ESHUTDOWN;
+
+ if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+ !ieee80211_is_data(hdr->frame_control)))
+ return -ENOTSUPP;
+
+ pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
+
+ ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
+
+tcl_ring_sel:
+ tcl_ring_retry = false;
+
+ ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
+ ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
+
+ ring_map |= BIT(ti.ring_id);
+
+ tx_ring = &dp->tx_ring[ti.ring_id];
+
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
+ DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (unlikely(ret < 0)) {
+ if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) ||
+ !ab->hw_params.tcl_ring_retry) {
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ return -ENOSPC;
+ }
+
+ /* Check if the next ring is available */
+ ring_selector++;
+ goto tcl_ring_sel;
+ }
+
+ ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
+ FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
+ FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
+ ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
+
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr3) && arsta &&
+ arsta->use_4addr_set) {
+ ti.meta_data_flags = arsta->tcl_metadata;
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
+ } else {
+ ti.meta_data_flags = arvif->tcl_metadata;
+ }
+
+ if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
+ if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
+ ti.encrypt_type =
+ ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
+
+ if (ieee80211_has_protected(hdr->frame_control))
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ } else {
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+ }
+
+ ti.addr_search_flags = arvif->hal_addr_search_flags;
+ ti.search_type = arvif->search_type;
+ ti.type = HAL_TCL_DESC_TYPE_BUFFER;
+ ti.pkt_offset = 0;
+ ti.lmac_id = ar->lmac_id;
+ ti.bss_ast_hash = arvif->ast_hash;
+ ti.bss_ast_idx = arvif->ast_idx;
+ ti.dscp_tid_tbl_idx = 0;
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
+ ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
+ }
+
+ if (ieee80211_vif_is_mesh(arvif->vif))
+ ti.enable_mesh = true;
+
+ ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
+
+ ti.tid = ath11k_dp_tx_get_tid(skb);
+
+ switch (ti.encap_type) {
+ case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+ ath11k_dp_tx_encap_nwifi(skb);
+ break;
+ case HAL_TCL_ENCAP_TYPE_RAW:
+ if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ret = -EINVAL;
+ goto fail_remove_idr;
+ }
+ break;
+ case HAL_TCL_ENCAP_TYPE_ETHERNET:
+ /* no need to encap */
+ break;
+ case HAL_TCL_ENCAP_TYPE_802_3:
+ default:
+ /* TODO: Take care of other encap modes as well */
+ ret = -EINVAL;
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ goto fail_remove_idr;
+ }
+
+ ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
+ atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+ ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
+ ret = -ENOMEM;
+ goto fail_remove_idr;
+ }
+
+ ti.data_len = skb->len;
+ skb_cb->paddr = ti.paddr;
+ skb_cb->vif = arvif->vif;
+ skb_cb->ar = ar;
+
+ hal_ring_id = tx_ring->tcl_data_ring.ring_id;
+ tcl_ring = &ab->hal.srng_list[hal_ring_id];
+
+ spin_lock_bh(&tcl_ring->lock);
+
+ ath11k_hal_srng_access_begin(ab, tcl_ring);
+
+ hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
+ if (unlikely(!hal_tcl_desc)) {
+ /* NOTE: It is highly unlikely we'll be running out of tcl_ring
+ * desc because the desc is directly enqueued onto hw queue.
+ */
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+ ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+ spin_unlock_bh(&tcl_ring->lock);
+ ret = -ENOMEM;
+
+ /* Checking for available tcl descriptors in another ring in
+ * case of failure due to full tcl ring now, is better than
+ * checking this ring earlier for each pkt tx.
+ * Restart ring selection if some rings are not checked yet.
+ */
+ if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) &&
+ ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) {
+ tcl_ring_retry = true;
+ ring_selector++;
+ }
+
+ goto fail_unmap_dma;
+ }
+
+ ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
+ sizeof(struct hal_tlv_hdr), &ti);
+
+ ath11k_hal_srng_access_end(ab, tcl_ring);
+
+ ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
+
+ spin_unlock_bh(&tcl_ring->lock);
+
+ ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
+ skb->data, skb->len);
+
+ atomic_inc(&ar->dp.num_tx_pending);
+
+ return 0;
+
+fail_unmap_dma:
+ dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
+
+fail_remove_idr:
+ spin_lock_bh(&tx_ring->tx_idr_lock);
+ idr_remove(&tx_ring->txbuf_idr,
+ FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
+ spin_unlock_bh(&tx_ring->tx_idr_lock);
+
+ if (tcl_ring_retry)
+ goto tcl_ring_sel;
+
+ return ret;
+}
+
+static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
+ int msdu_id,
+ struct dp_tx_ring *tx_ring)
+{
+ struct ath11k *ar;
+ struct sk_buff *msdu;
+ struct ath11k_skb_cb *skb_cb;
+
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ spin_unlock(&tx_ring->tx_idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(msdu);
+
+ ar = ab->pdevs[mac_id].ar;
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+}
+
+static void
+ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
+ struct dp_tx_ring *tx_ring,
+ struct ath11k_dp_htt_wbm_tx_status *ts)
+{
+ struct ieee80211_tx_status status = { 0 };
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+ struct ath11k *ar;
+ struct ath11k_peer *peer;
+
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
+ spin_unlock(&tx_ring->tx_idr_lock);
+
+ if (unlikely(!msdu)) {
+ ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
+ ts->msdu_id);
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ info = IEEE80211_SKB_CB(msdu);
+
+ ar = skb_cb->ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ if (!skb_cb->vif) {
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+
+ memset(&info->status, 0, sizeof(info->status));
+
+ if (ts->acked) {
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.flags |=
+ IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ } else {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ }
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "dp_tx: failed to find the peer with peer_id %d\n",
+ ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ status.sta = peer->sta;
+ status.info = info;
+ status.skb = msdu;
+
+ ieee80211_tx_status_ext(ar->hw, &status);
+}
+
+static void
+ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
+ void *desc, u8 mac_id,
+ u32 msdu_id, struct dp_tx_ring *tx_ring)
+{
+ struct htt_tx_wbm_completion *status_desc;
+ struct ath11k_dp_htt_wbm_tx_status ts = {0};
+ enum hal_wbm_htt_tx_comp_status wbm_status;
+
+ status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+
+ wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
+ status_desc->info0);
+ switch (wbm_status) {
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
+ ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
+ ts.msdu_id = msdu_id;
+ ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
+ status_desc->info1);
+
+ if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
+ ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
+ status_desc->info2);
+ else
+ ts.peer_id = HTT_INVALID_PEER_ID;
+
+ ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
+
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
+ ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
+ break;
+ case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
+ /* This event is to be handled only when the driver decides to
+ * use WDS offload functionality.
+ */
+ break;
+ default:
+ ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+ break;
+ }
+}
+
+static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+
+ if (ts->try_cnt > 1) {
+ peer_stats->retry_pkts += ts->try_cnt - 1;
+ peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
+
+ if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
+ peer_stats->failed_pkts += 1;
+ peer_stats->failed_bytes += msdu->len;
+ }
+ }
+}
+
+void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
+ enum hal_tx_rate_stats_pkt_type pkt_type;
+ enum hal_tx_rate_stats_sgi sgi;
+ enum hal_tx_rate_stats_bw bw;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ u16 rate, ru_tones;
+ u8 mcs, rate_idx = 0, ofdma;
+ int ret;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DP_TX,
+ "failed to find the peer by id %u\n", ts->peer_id);
+ goto err_out;
+ }
+
+ sta = peer->sta;
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(&arsta->txrate, 0, sizeof(arsta->txrate));
+ pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
+ ts->rate_stats);
+ mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
+ ts->rate_stats);
+ sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
+ ts->rate_stats);
+ bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
+ ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats);
+ ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats);
+
+ /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+ * if it is invalid, then choose the NSS value while assoc.
+ */
+ if (arsta->last_txrate.nss)
+ arsta->txrate.nss = arsta->last_txrate.nss;
+ else
+ arsta->txrate.nss = arsta->peer_nss;
+
+ if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
+ pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
+ ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
+ pkt_type,
+ &rate_idx,
+ &rate);
+ if (ret < 0)
+ goto err_out;
+ arsta->txrate.legacy = rate;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
+ if (mcs > 7) {
+ ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ if (arsta->txrate.nss != 0)
+ arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1);
+ arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
+ if (mcs > 9) {
+ ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (sgi)
+ arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ if (mcs > 11) {
+ ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs);
+ goto err_out;
+ }
+
+ arsta->txrate.mcs = mcs;
+ arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+ arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
+ }
+
+ arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
+ if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+ arsta->txrate.bw = RATE_INFO_BW_HE_RU;
+ arsta->txrate.he_ru_alloc =
+ ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+ }
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
+ ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
+
+err_out:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+ struct ieee80211_tx_status status = { 0 };
+ struct ieee80211_rate_status status_rate = { 0 };
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ struct rate_info rate;
+
+ if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
+ /* Must not happen */
+ return;
+ }
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+
+ dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+
+ if (unlikely(!skb_cb->vif)) {
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ /* skip tx rate update from ieee80211_status*/
+ info->status.rates[0].idx = -1;
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
+ !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
+ ts->ack_rssi;
+ info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+
+ if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ||
+ ab->hw_params.single_pdev_only) {
+ if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
+ if (ar->last_ppdu_id == 0) {
+ ar->last_ppdu_id = ts->ppdu_id;
+ } else if (ar->last_ppdu_id == ts->ppdu_id ||
+ ar->cached_ppdu_id == ar->last_ppdu_id) {
+ ar->cached_ppdu_id = ar->last_ppdu_id;
+ ar->cached_stats.is_ampdu = true;
+ ath11k_dp_tx_update_txcompl(ar, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ } else {
+ ar->cached_stats.is_ampdu = false;
+ ath11k_dp_tx_update_txcompl(ar, ts);
+ memset(&ar->cached_stats, 0,
+ sizeof(struct ath11k_per_peer_tx_stats));
+ }
+ ar->last_ppdu_id = ts->ppdu_id;
+ }
+
+ ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_id(ab, ts->peer_id);
+ if (!peer || !peer->sta) {
+ ath11k_dbg(ab, ATH11K_DBG_DATA,
+ "dp_tx: failed to find the peer with peer_id %d\n",
+ ts->peer_id);
+ spin_unlock_bh(&ab->base_lock);
+ ieee80211_free_txskb(ar->hw, msdu);
+ return;
+ }
+ arsta = (struct ath11k_sta *)peer->sta->drv_priv;
+ status.sta = peer->sta;
+ status.skb = msdu;
+ status.info = info;
+ rate = arsta->last_txrate;
+
+ status_rate.rate_idx = rate;
+ status_rate.try_count = 1;
+
+ status.rates = &status_rate;
+ status.n_rates = 1;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ieee80211_tx_status_ext(ar->hw, &status);
+}
+
+static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
+ struct hal_wbm_release_ring *desc,
+ struct hal_tx_status *ts)
+{
+ ts->buf_rel_source =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
+ if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
+ ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))
+ return;
+
+ if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW))
+ return;
+
+ ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
+ desc->info0);
+ ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
+ desc->info1);
+ ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
+ desc->info1);
+ ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
+ desc->info2);
+ if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
+ ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
+ ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
+ ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
+ if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
+ ts->rate_stats = desc->rate_stats.info0;
+ else
+ ts->rate_stats = 0;
+}
+
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
+{
+ struct ath11k *ar;
+ struct ath11k_dp *dp = &ab->dp;
+ int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
+ struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
+ struct sk_buff *msdu;
+ struct hal_tx_status ts = { 0 };
+ struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
+ u32 *desc;
+ u32 msdu_id;
+ u8 mac_id;
+
+ spin_lock_bh(&status_ring->lock);
+
+ ath11k_hal_srng_access_begin(ab, status_ring);
+
+ while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
+ tx_ring->tx_status_tail) &&
+ (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
+ memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
+ desc, sizeof(struct hal_wbm_release_ring));
+ tx_ring->tx_status_head =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
+ }
+
+ if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
+ (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
+ tx_ring->tx_status_tail))) {
+ /* TODO: Process pending tx_status messages when kfifo_is_full() */
+ ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
+ }
+
+ ath11k_hal_srng_access_end(ab, status_ring);
+
+ spin_unlock_bh(&status_ring->lock);
+
+ while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
+ struct hal_wbm_release_ring *tx_status;
+ u32 desc_id;
+
+ tx_ring->tx_status_tail =
+ ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
+ tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
+ ath11k_dp_tx_status_parse(ab, tx_status, &ts);
+
+ desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ tx_status->buf_addr_info.info1);
+ mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
+ msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
+
+ if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
+ ath11k_dp_tx_process_htt_tx_complete(ab,
+ (void *)tx_status,
+ mac_id, msdu_id,
+ tx_ring);
+ continue;
+ }
+
+ spin_lock(&tx_ring->tx_idr_lock);
+ msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
+ if (unlikely(!msdu)) {
+ ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
+ msdu_id);
+ spin_unlock(&tx_ring->tx_idr_lock);
+ continue;
+ }
+
+ spin_unlock(&tx_ring->tx_idr_lock);
+
+ ar = ab->pdevs[mac_id].ar;
+
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
+ ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
+ }
+}
+
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*cb)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status))
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct dp_reo_cmd *dp_cmd;
+ struct hal_srng *cmd_ring;
+ int cmd_num;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
+ cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
+ cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
+
+ /* cmd_num should start from 1, during failure return the error code */
+ if (cmd_num < 0)
+ return cmd_num;
+
+ /* reo cmd ring descriptors has cmd_num starting from 1 */
+ if (cmd_num == 0)
+ return -EINVAL;
+
+ if (!cb)
+ return 0;
+
+ /* Can this be optimized so that we keep the pending command list only
+ * for tid delete command to free up the resource on the command status
+ * indication?
+ */
+ dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
+
+ if (!dp_cmd)
+ return -ENOMEM;
+
+ memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
+ dp_cmd->cmd_num = cmd_num;
+ dp_cmd->handler = cb;
+
+ spin_lock_bh(&dp->reo_cmd_lock);
+ list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
+ spin_unlock_bh(&dp->reo_cmd_lock);
+
+ return 0;
+}
+
+static int
+ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
+ int mac_id, u32 ring_id,
+ enum hal_ring_type ring_type,
+ enum htt_srng_ring_type *htt_ring_type,
+ enum htt_srng_ring_id *htt_ring_id)
+{
+ int lmac_ring_id_offset = 0;
+ int ret = 0;
+
+ switch (ring_type) {
+ case HAL_RXDMA_BUF:
+ lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
+
+ /* for QCA6390, host fills rx buffer to fw and fw fills to
+ * rxbuf ring for each rxdma
+ */
+ if (!ab->hw_params.rx_mac_buf_ring) {
+ if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
+ lmac_ring_id_offset) ||
+ ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
+ lmac_ring_id_offset))) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ } else {
+ if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
+ *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
+ *htt_ring_type = HTT_SW_TO_SW_RING;
+ } else {
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ }
+ }
+ break;
+ case HAL_RXDMA_DST:
+ *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DST:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+ *htt_ring_type = HTT_HW_TO_SW_RING;
+ break;
+ case HAL_RXDMA_MONITOR_DESC:
+ *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ break;
+ default:
+ ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type)
+{
+ struct htt_srng_setup_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ u32 ring_entry_sz;
+ int len = sizeof(*cmd);
+ dma_addr_t hp_addr, tp_addr;
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
+ tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_srng_setup_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_SRING_SETUP);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
+ htt_ring_type);
+ cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
+
+ cmd->ring_base_addr_lo = params.ring_base_paddr &
+ HAL_ADDR_LSB_REG_MASK;
+
+ cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
+ if (ret < 0)
+ goto err_free;
+
+ ring_entry_sz = ret;
+
+ ring_entry_sz >>= 2;
+ cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
+ ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
+ params.num_entries * ring_entry_sz);
+ cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+ cmd->info1 |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
+ !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
+ if (htt_ring_type == HTT_SW_TO_HW_RING)
+ cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
+
+ cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
+ cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
+ HAL_ADDR_MSB_REG_SHIFT;
+
+ cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr);
+ cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr);
+ cmd->msi_data = params.msi_data;
+
+ cmd->intr_info = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
+ params.intr_batch_cntr_thres_entries * ring_entry_sz);
+ cmd->intr_info |= FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
+ params.intr_timer_thres_us >> 3);
+
+ cmd->info2 = 0;
+ if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ cmd->info2 = FIELD_PREP(
+ HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
+ params.low_threshold);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_TX,
+ "htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n",
+ cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
+ cmd->msi_data, ring_id, ring_type, cmd->intr_info, cmd->info2);
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
+
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
+{
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ver_req_cmd *cmd;
+ int len = sizeof(*cmd);
+ int ret;
+
+ init_completion(&dp->htt_tgt_version_received);
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ver_req_cmd *)skb->data;
+ cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
+ HTT_H2T_MSG_TYPE_VERSION_REQ);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
+ HTT_TARGET_VERSION_TIMEOUT_HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "htt target version request timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
+ ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
+ dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ppdu_stats_cfg_cmd *cmd;
+ int len = sizeof(*cmd);
+ u8 pdev_mask;
+ int ret;
+ int i;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
+
+ pdev_mask = 1 << (ar->pdev_idx + i);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter)
+{
+ struct htt_rx_ring_selection_cfg_cmd *cmd;
+ struct hal_srng *srng = &ab->hal.srng_list[ring_id];
+ struct hal_srng_params params;
+ struct sk_buff *skb;
+ int len = sizeof(*cmd);
+ enum htt_srng_ring_type htt_ring_type;
+ enum htt_srng_ring_id htt_ring_id;
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ memset(&params, 0, sizeof(params));
+ ath11k_hal_srng_get_params(ab, srng, &params);
+
+ ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
+ ring_type, &htt_ring_type,
+ &htt_ring_id);
+ if (ret)
+ goto err_free;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
+ cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
+ if (htt_ring_type == HTT_SW_TO_HW_RING ||
+ htt_ring_type == HTT_HW_TO_SW_RING)
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ DP_SW2HW_MACID(mac_id));
+ else
+ cmd->info0 |=
+ FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
+ mac_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
+ htt_ring_id);
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
+ !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
+ cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
+ !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
+
+ cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
+ rx_buf_size);
+ cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
+ cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
+ cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
+ cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
+ cmd->rx_filter_tlv = tlv_filter->rx_filter;
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_dp *dp = &ab->dp;
+ struct sk_buff *skb;
+ struct htt_ext_stats_cfg_cmd *cmd;
+ u32 pdev_id;
+ int len = sizeof(*cmd);
+ int ret;
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+
+ cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
+
+ if (ab->hw_params.single_pdev_only)
+ pdev_id = ath11k_mac_get_target_pdev_id(ar);
+ else
+ pdev_id = ar->pdev->pdev_id;
+
+ cmd->hdr.pdev_mask = 1 << pdev_id;
+
+ cmd->hdr.stats_type = type;
+ cmd->cfg_param0 = cfg_params->cfg0;
+ cmd->cfg_param1 = cfg_params->cfg1;
+ cmd->cfg_param2 = cfg_params->cfg2;
+ cmd->cfg_param3 = cfg_params->cfg3;
+ cmd->cookie_lsb = lower_32_bits(cookie);
+ cmd->cookie_msb = upper_32_bits(cookie);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ ath11k_warn(ab, "failed to send htt type stats request: %d",
+ ret);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
+{
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ int ret = 0, ring_id = 0, i;
+
+ if (ab->hw_params.full_monitor_mode) {
+ ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab,
+ dp->mac_id, !reset);
+ if (ret < 0) {
+ ath11k_err(ab, "failed to setup full monitor %d\n", ret);
+ return ret;
+ }
+ }
+
+ ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
+
+ if (!reset) {
+ tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+ tlv_filter.pkt_filter_flags0 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 =
+ HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
+ HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 =
+ HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
+ HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+ }
+
+ if (ab->hw_params.rxdma1_enable) {
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ } else if (!reset) {
+ /* set in monitor mode only */
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ dp->mac_id + i,
+ HAL_RXDMA_BUF,
+ 1024,
+ &tlv_filter);
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ if (!reset) {
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ } else {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
+ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
+ }
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ dp->mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ }
+
+ if (!ar->ab->hw_params.rxdma1_enable)
+ mod_timer(&ar->ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+
+ return ret;
+}
+
+int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
+ bool config)
+{
+ struct htt_rx_full_monitor_mode_cfg_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len = sizeof(*cmd);
+
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
+
+ cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id);
+
+ cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE |
+ FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING,
+ HTT_RX_MON_RING_SW);
+ if (config) {
+ cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END |
+ HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END;
+ }
+
+ ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
new file mode 100644
index 0000000000..68a21ea9b9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_DP_TX_H
+#define ATH11K_DP_TX_H
+
+#include "core.h"
+#include "hal_tx.h"
+
+struct ath11k_dp_htt_wbm_tx_status {
+ u32 msdu_id;
+ bool acked;
+ int ack_rssi;
+ u16 peer_id;
+};
+
+void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts);
+int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
+int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ath11k_sta *arsta, struct sk_buff *skb);
+void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id);
+int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd,
+ void (*func)(struct ath11k_dp *, void *,
+ enum hal_reo_cmd_status));
+
+int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask);
+int
+ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
+ struct htt_ext_stats_cfg_params *cfg_params,
+ u64 cookie);
+int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset);
+
+int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
+ int mac_id, enum hal_ring_type ring_type,
+ int rx_buf_size,
+ struct htt_rx_ring_tlv_filter *tlv_filter);
+
+int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
+ bool config);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
new file mode 100644
index 0000000000..0a99aa7ddb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -0,0 +1,1380 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/dma-mapping.h>
+#include "hal_tx.h"
+#include "debug.h"
+#include "hal_desc.h"
+#include "hif.h"
+
+static const struct hal_srng_config hw_srng_config_template[] = {
+ /* TODO: max_rings can populated by querying HW capabilities */
+ { /* REO_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
+ .max_rings = 4,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_EXCEPTION */
+ /* Designating REO2TCL ring as exception ring. This ring is
+ * similar to other REO2SW rings though it is named as REO2TCL.
+ * Any of theREO2SW rings can be used as exception ring.
+ */
+ .start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_REINJECT */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* REO_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_DATA */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
+ .max_rings = 3,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_data_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_CMD */
+ .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_gse_cmd)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* TCL_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
+ .max_rings = 1,
+ .entry_size = (sizeof(struct hal_tlv_hdr) +
+ sizeof(struct hal_tcl_status_ring)) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_SRC */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* CE_DST_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
+ .max_rings = 12,
+ .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM_IDLE_LINK */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* SW2WBM_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* WBM2SW_RELEASE */
+ .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ .max_rings = 5,
+ .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
+ .lmac_ring = false,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
+ },
+ { /* RXDMA_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
+ .max_rings = 2,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_STATUS */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DST */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_DST,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA_MONITOR_DESC */
+ .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ .max_rings = 1,
+ .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+ { /* RXDMA DIR BUF */
+ .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+ .max_rings = 1,
+ .entry_size = 8 >> 2, /* TODO: Define the struct */
+ .lmac_ring = true,
+ .ring_dir = HAL_SRNG_DIR_SRC,
+ .max_size = HAL_RXDMA_RING_MAX_SIZE,
+ },
+};
+
+static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
+ GFP_KERNEL);
+ if (!hal->rdp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->rdp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
+ dma_free_coherent(ab->dev, size,
+ hal->rdp.vaddr, hal->rdp.paddr);
+ hal->rdp.vaddr = NULL;
+}
+
+static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
+ GFP_KERNEL);
+ if (!hal->wrp.vaddr)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ size_t size;
+
+ if (!hal->wrp.vaddr)
+ return;
+
+ size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
+ dma_free_coherent(ab->dev, size,
+ hal->wrp.vaddr, hal->wrp.paddr);
+ hal->wrp.vaddr = NULL;
+}
+
+static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
+ struct hal_srng *srng, int ring_num)
+{
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
+ u32 addr;
+ u32 val;
+
+ addr = HAL_CE_DST_RING_CTRL +
+ srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
+ ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
+
+ val = ath11k_hif_read32(ab, addr);
+ val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
+ val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
+ srng->u.dst_ring.max_buffer_length);
+ ath11k_hif_write32(ab, addr, val);
+}
+
+static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 hp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_hif_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
+
+ val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_hif_write32(ab, reg_base +
+ HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
+
+ ath11k_hif_write32(ab,
+ reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
+ srng->msi_data);
+ }
+
+ ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
+ FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
+
+ /* interrupt setup */
+ val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
+ (srng->intr_timer_thres_us >> 3));
+
+ val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_hif_write32(ab,
+ reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
+ val);
+
+ hp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
+ hp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
+ hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_hif_write32(ab, reg_base, 0);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
+ *srng->u.dst_ring.hp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_REO1_RING_MISC_MSI_SWAP;
+ val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
+}
+
+static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 val;
+ u64 tp_addr;
+ u32 reg_base;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
+ ath11k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
+ ((u64)srng->msi_addr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
+ ath11k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
+ val);
+
+ ath11k_hif_write32(ab, reg_base +
+ HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
+ srng->msi_data);
+ }
+
+ ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
+
+ val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
+
+ val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
+
+ if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
+ ((u64)srng->ring_base_paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
+ (srng->entry_size * srng->num_entries));
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
+ }
+
+ /* interrupt setup */
+ /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
+ * unit of 8 usecs instead of 1 usec (as required by v1).
+ */
+ val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
+ srng->intr_timer_thres_us);
+
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
+ (srng->intr_batch_cntr_thres_entries *
+ srng->entry_size));
+
+ ath11k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
+ val);
+
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
+ val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
+ srng->u.src_ring.low_threshold);
+ }
+ ath11k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
+ val);
+
+ if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
+ tp_addr = hal->rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)hal->rdp.vaddr);
+ ath11k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
+ tp_addr & HAL_ADDR_LSB_REG_MASK);
+ ath11k_hif_write32(ab,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
+ tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
+ }
+
+ /* Initialize head and tail pointers to indicate ring is empty */
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+ ath11k_hif_write32(ab, reg_base, 0);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
+ *srng->u.src_ring.tp_addr = 0;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
+ val = 0;
+ if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
+ val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
+ val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
+ if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
+ val |= HAL_TCL1_RING_MISC_MSI_SWAP;
+
+ /* Loop count is not used for SRC rings */
+ val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
+
+ val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
+
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
+}
+
+static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath11k_hal_srng_src_hw_init(ab, srng);
+ else
+ ath11k_hal_srng_dst_hw_init(ab, srng);
+}
+
+static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
+ enum hal_ring_type type,
+ int ring_num, int mac_id)
+{
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
+ int ring_id;
+
+ if (ring_num >= srng_config->max_rings) {
+ ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
+ return -EINVAL;
+ }
+
+ ring_id = srng_config->start_ring_id + ring_num;
+ if (srng_config->lmac_ring)
+ ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
+
+ if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
+ return -EINVAL;
+
+ return ring_id;
+}
+
+int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
+{
+ struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &ab->hal.srng_config[ring_type];
+
+ return (srng_config->entry_size << 2);
+}
+
+int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
+{
+ struct hal_srng_config *srng_config;
+
+ if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
+ return -EINVAL;
+
+ srng_config = &ab->hal.srng_config[ring_type];
+
+ return (srng_config->max_size / srng_config->entry_size);
+}
+
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params)
+{
+ params->ring_base_paddr = srng->ring_base_paddr;
+ params->ring_base_vaddr = srng->ring_base_vaddr;
+ params->num_entries = srng->num_entries;
+ params->intr_timer_thres_us = srng->intr_timer_thres_us;
+ params->intr_batch_cntr_thres_entries =
+ srng->intr_batch_cntr_thres_entries;
+ params->low_threshold = srng->u.src_ring.low_threshold;
+ params->msi_addr = srng->msi_addr;
+ params->msi_data = srng->msi_data;
+ params->flags = srng->flags;
+}
+
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+ else
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.dst_ring.hp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+}
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
+ return 0;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ return ab->hal.rdp.paddr +
+ ((unsigned long)srng->u.src_ring.tp_addr -
+ (unsigned long)ab->hal.rdp.vaddr);
+ else
+ return ab->hal.wrp.paddr +
+ ((unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->hal.wrp.vaddr);
+}
+
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
+{
+ switch (type) {
+ case HAL_CE_DESC_SRC:
+ return sizeof(struct hal_ce_srng_src_desc);
+ case HAL_CE_DESC_DST:
+ return sizeof(struct hal_ce_srng_dest_desc);
+ case HAL_CE_DESC_DST_STATUS:
+ return sizeof(struct hal_ce_srng_dst_status_desc);
+ }
+
+ return 0;
+}
+
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data)
+{
+ struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
+ byte_swap_data) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
+ FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
+ desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
+}
+
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
+{
+ struct hal_ce_srng_dest_desc *desc =
+ (struct hal_ce_srng_dest_desc *)buf;
+
+ desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
+ desc->buffer_addr_info =
+ FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
+}
+
+u32 ath11k_hal_ce_dst_status_get_length(void *buf)
+{
+ struct hal_ce_srng_dst_status_desc *desc =
+ (struct hal_ce_srng_dst_status_desc *)buf;
+ u32 len;
+
+ len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
+ desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
+
+ return len;
+}
+
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr)
+{
+ desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ (paddr & HAL_ADDR_LSB_REG_MASK));
+ desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
+}
+
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
+ return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
+
+ return NULL;
+}
+
+static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ /* prefetch only if desc is available */
+ desc = ath11k_hal_srng_dst_peek(ab, srng);
+ if (likely(desc)) {
+ dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
+ (srng->entry_size * sizeof(u32)),
+ DMA_FROM_DEVICE);
+ prefetch(desc);
+ }
+}
+
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
+
+ srng->u.dst_ring.tp += srng->entry_size;
+
+ /* wrap around to start of ring*/
+ if (srng->u.dst_ring.tp == srng->ring_size)
+ srng->u.dst_ring.tp = 0;
+
+ /* Try to prefetch the next descriptor in the ring */
+ if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+ ath11k_hal_srng_prefetch_desc(ab, srng);
+
+ return desc;
+}
+
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ tp = srng->u.dst_ring.tp;
+
+ if (sync_hw_ptr) {
+ hp = *srng->u.dst_ring.hp_addr;
+ srng->u.dst_ring.cached_hp = hp;
+ } else {
+ hp = srng->u.dst_ring.cached_hp;
+ }
+
+ if (hp >= tp)
+ return (hp - tp) / srng->entry_size;
+ else
+ return (srng->ring_size - tp + hp) / srng->entry_size;
+}
+
+/* Returns number of available entries in src ring */
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr)
+{
+ u32 tp, hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ hp = srng->u.src_ring.hp;
+
+ if (sync_hw_ptr) {
+ tp = *srng->u.src_ring.tp_addr;
+ srng->u.src_ring.cached_tp = tp;
+ } else {
+ tp = srng->u.src_ring.cached_tp;
+ }
+
+ if (tp > hp)
+ return ((tp - hp) / srng->entry_size) - 1;
+ else
+ return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
+}
+
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: Using % is expensive, but we have to do this since size of some
+ * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
+ * if separate function is defined for rings having power of 2 ring size
+ * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
+ * overhead of % by using mask (with &).
+ */
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = next_hp;
+
+ /* TODO: Reap functionality is not used by all rings. If particular
+ * ring does not use reap functionality, we need not update reap_hp
+ * with next_hp pointer. Need to make sure a separate function is used
+ * before doing any optimization by removing below code updating
+ * reap_hp.
+ */
+ srng->u.src_ring.reap_hp = next_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+ u32 next_reap_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
+ srng->ring_size;
+
+ if (next_reap_hp == srng->u.src_ring.cached_tp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + next_reap_hp;
+ srng->u.src_ring.reap_hp = next_reap_hp;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ u32 *desc;
+
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
+ return NULL;
+
+ desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
+ srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
+ srng->ring_size;
+
+ return desc;
+}
+
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
+ srng->u.src_ring.cached_tp)
+ return NULL;
+
+ return srng->ring_base_vaddr + srng->u.src_ring.hp;
+}
+
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.cached_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ } else {
+ srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+
+ /* Try to prefetch the next descriptor in the ring */
+ if (srng->flags & HAL_SRNG_FLAGS_CACHED)
+ ath11k_hal_srng_prefetch_desc(ab, srng);
+ }
+}
+
+/* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
+ * should have been called before this.
+ */
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* TODO: See if we need a write memory barrier here */
+ if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
+ /* For LMAC rings, ring pointer updates are done through FW and
+ * hence written to a shared memory location that is read by FW
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+ }
+ } else {
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.last_tp =
+ *(volatile u32 *)srng->u.src_ring.tp_addr;
+ ath11k_hif_write32(ab,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem,
+ srng->u.src_ring.hp);
+ } else {
+ srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
+ ath11k_hif_write32(ab,
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem,
+ srng->u.dst_ring.tp);
+ }
+ }
+
+ srng->timestamp = jiffies;
+}
+
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset)
+{
+ struct ath11k_buffer_addr *link_addr;
+ int i;
+ u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
+
+ link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+
+ for (i = 1; i < nsbufs; i++) {
+ link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
+ link_addr->info1 = FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL);
+
+ link_addr = (void *)sbuf[i].vaddr +
+ HAL_WBM_IDLE_SCATTER_BUF_SIZE;
+ }
+
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
+ FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
+ FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
+ reg_scatter_buf_sz * nsbufs));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_LSB,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_RING_BASE_MSB,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
+ BASE_ADDR_MATCH_TAG_VAL));
+
+ /* Setup head and tail pointers for the idle list */
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[nsbufs - 1].paddr));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[nsbufs - 1].paddr >>
+ HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
+ (end_offset >> 2)));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
+ sbuf[0].paddr));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
+ FIELD_PREP(
+ HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
+ ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
+ FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
+ 0));
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
+ 2 * tot_link_desc);
+
+ /* Enable the SRNG */
+ ath11k_hif_write32(ab,
+ HAL_SEQ_WCSS_UMAC_WBM_REG +
+ HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40);
+}
+
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
+ struct hal_srng *srng;
+ int ring_id;
+ u32 lmac_idx;
+ int i;
+ u32 reg_base;
+
+ ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
+ if (ring_id < 0)
+ return ring_id;
+
+ srng = &hal->srng_list[ring_id];
+
+ srng->ring_id = ring_id;
+ srng->ring_dir = srng_config->ring_dir;
+ srng->ring_base_paddr = params->ring_base_paddr;
+ srng->ring_base_vaddr = params->ring_base_vaddr;
+ srng->entry_size = srng_config->entry_size;
+ srng->num_entries = params->num_entries;
+ srng->ring_size = srng->entry_size * srng->num_entries;
+ srng->intr_batch_cntr_thres_entries =
+ params->intr_batch_cntr_thres_entries;
+ srng->intr_timer_thres_us = params->intr_timer_thres_us;
+ srng->flags = params->flags;
+ srng->msi_addr = params->msi_addr;
+ srng->msi_data = params->msi_data;
+ srng->initialized = 1;
+ spin_lock_init(&srng->lock);
+ lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
+
+ for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
+ srng->hwreg_base[i] = srng_config->reg_start[i] +
+ (ring_num * srng_config->reg_size[i]);
+ }
+
+ memset(srng->ring_base_vaddr, 0,
+ (srng->entry_size * srng->num_entries) << 2);
+
+ /* TODO: Add comments on these swap configurations */
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
+ HAL_SRNG_FLAGS_RING_PTR_SWAP;
+
+ reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
+ srng->u.src_ring.hp = 0;
+ srng->u.src_ring.cached_tp = 0;
+ srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
+ srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ srng->u.src_ring.low_threshold = params->low_threshold *
+ srng->entry_size;
+ if (srng_config->lmac_ring) {
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ if (!ab->hw_params.supports_shadow_regs)
+ srng->u.src_ring.hp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base);
+ else
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem);
+ }
+ } else {
+ /* During initialization loop count in all the descriptors
+ * will be set to zero, and HW will set it to 1 on completing
+ * descriptor update in first loop, and increments it by 1 on
+ * subsequent loops (loop count wraps around after reaching
+ * 0xffff). The 'loop_cnt' in SW ring state is the expected
+ * loop count in descriptors updated by HW (to be processed
+ * by SW).
+ */
+ srng->u.dst_ring.loop_cnt = 1;
+ srng->u.dst_ring.tp = 0;
+ srng->u.dst_ring.cached_hp = 0;
+ srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
+ if (srng_config->lmac_ring) {
+ /* For LMAC rings, tail pointer updates will be done
+ * through FW by writing to a shared memory location
+ */
+ lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
+ srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
+ lmac_idx);
+ srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
+ } else {
+ if (!ab->hw_params.supports_shadow_regs)
+ srng->u.dst_ring.tp_addr =
+ (u32 *)((unsigned long)ab->mem + reg_base +
+ (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
+ else
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base + (HAL_REO1_RING_TP(ab) -
+ HAL_REO1_RING_HP(ab)),
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem);
+ }
+ }
+
+ if (srng_config->lmac_ring)
+ return ring_id;
+
+ ath11k_hal_srng_hw_init(ab, srng);
+
+ if (type == HAL_CE_DST) {
+ srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
+ ath11k_hal_ce_dst_setup(ab, srng, ring_num);
+ }
+
+ return ring_id;
+}
+
+static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
+ int shadow_cfg_idx,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct hal_srng *srng;
+ struct ath11k_hal *hal = &ab->hal;
+ int ring_id;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
+ if (ring_id < 0)
+ return;
+
+ srng = &hal->srng_list[ring_id];
+
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+ else
+ srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+}
+
+int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+ int shadow_cfg_idx = hal->num_shadow_reg_configured;
+ u32 target_reg;
+
+ if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
+ return -EINVAL;
+
+ hal->num_shadow_reg_configured++;
+
+ target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
+ target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
+ ring_num;
+
+ /* For destination ring, shadow the TP */
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ target_reg += HAL_OFFSET_FROM_HP_TO_TP;
+
+ hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
+
+ /* update hp/tp addr to hal structure*/
+ ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
+ ring_num);
+
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "update shadow config target_reg %x shadow reg 0x%x shadow_idx 0x%x ring_type %d ring num %d",
+ target_reg,
+ HAL_SHADOW_REG(ab, shadow_cfg_idx),
+ shadow_cfg_idx,
+ ring_type, ring_num);
+
+ return 0;
+}
+
+void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int ring_type, ring_num;
+
+ /* update all the non-CE srngs. */
+ for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ if (ring_type == HAL_CE_SRC ||
+ ring_type == HAL_CE_DST ||
+ ring_type == HAL_CE_DST_STATUS)
+ continue;
+
+ if (srng_config->lmac_ring)
+ continue;
+
+ for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
+ ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
+ }
+}
+
+void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len)
+{
+ struct ath11k_hal *hal = &ab->hal;
+
+ *len = hal->num_shadow_reg_configured;
+ *cfg = hal->shadow_reg_addr;
+}
+
+void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* check whether the ring is empty. Update the shadow
+ * HP only when then ring isn't empty.
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
+ *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
+ ath11k_hal_srng_access_end(ab, srng);
+}
+
+static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ s = &hal->srng_config[HAL_CE_SRC];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
+
+ s = &hal->srng_config[HAL_CE_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+
+ s = &hal->srng_config[HAL_CE_DST_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
+ HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP +
+ ATH11K_CE_OFFSET(ab);
+ s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+ s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
+ HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
+
+ s = &hal->srng_config[HAL_WBM_IDLE_LINK];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
+
+ s = &hal->srng_config[HAL_SW2WBM_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
+
+ s = &hal->srng_config[HAL_WBM2SW_RELEASE];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
+ s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
+ HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
+
+ return 0;
+}
+
+static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_register_key(hal->srng_key + ring_id);
+}
+
+static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ u32 ring_id;
+
+ for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
+ lockdep_unregister_key(hal->srng_key + ring_id);
+}
+
+int ath11k_hal_srng_init(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int ret;
+
+ memset(hal, 0, sizeof(*hal));
+
+ ret = ath11k_hal_srng_create_config(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath11k_hal_alloc_cont_rdp(ab);
+ if (ret)
+ goto err_hal;
+
+ ret = ath11k_hal_alloc_cont_wrp(ab);
+ if (ret)
+ goto err_free_cont_rdp;
+
+ ath11k_hal_register_srng_key(ab);
+
+ return 0;
+
+err_free_cont_rdp:
+ ath11k_hal_free_cont_rdp(ab);
+
+err_hal:
+ return ret;
+}
+EXPORT_SYMBOL(ath11k_hal_srng_init);
+
+void ath11k_hal_srng_deinit(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+
+ ath11k_hal_unregister_srng_key(ab);
+ ath11k_hal_free_cont_rdp(ab);
+ ath11k_hal_free_cont_wrp(ab);
+ kfree(hal->srng_config);
+}
+EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+
+void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
+{
+ struct hal_srng *srng;
+ struct ath11k_ext_irq_grp *irq_grp;
+ struct ath11k_ce_pipe *ce_pipe;
+ int i;
+
+ ath11k_err(ab, "Last interrupt received for each CE:\n");
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
+ i, ce_pipe->pipe_num,
+ jiffies_to_msecs(jiffies - ce_pipe->timestamp));
+ }
+
+ ath11k_err(ab, "\nLast interrupt received for each group:\n");
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ irq_grp = &ab->ext_irq_grp[i];
+ ath11k_err(ab, "group_id %d %ums before\n",
+ irq_grp->grp_id,
+ jiffies_to_msecs(jiffies - irq_grp->timestamp));
+ }
+
+ for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
+ srng = &ab->hal.srng_list[i];
+
+ if (!srng->initialized)
+ continue;
+
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+ ath11k_err(ab,
+ "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.src_ring.hp,
+ srng->u.src_ring.reap_hp,
+ *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
+ srng->u.src_ring.last_tp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ else if (srng->ring_dir == HAL_SRNG_DIR_DST)
+ ath11k_err(ab,
+ "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
+ srng->ring_id, srng->u.dst_ring.tp,
+ *srng->u.dst_ring.hp_addr,
+ srng->u.dst_ring.cached_hp,
+ srng->u.dst_ring.last_hp,
+ jiffies_to_msecs(jiffies - srng->timestamp));
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
new file mode 100644
index 0000000000..1942d41d6d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -0,0 +1,974 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_H
+#define ATH11K_HAL_H
+
+#include "hal_desc.h"
+#include "rx_desc.h"
+
+struct ath11k_base;
+
+#define HAL_LINK_DESC_SIZE (32 << 2)
+#define HAL_LINK_DESC_ALIGN 128
+#define HAL_NUM_MPDUS_PER_LINK_DESC 6
+#define HAL_NUM_TX_MSDUS_PER_LINK_DESC 7
+#define HAL_NUM_RX_MSDUS_PER_LINK_DESC 6
+#define HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC 12
+#define HAL_MAX_AVAIL_BLK_RES 3
+
+#define HAL_RING_BASE_ALIGN 8
+
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX 32704
+/* TODO: Check with hw team on the supported scatter buf size */
+#define HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE 8
+#define HAL_WBM_IDLE_SCATTER_BUF_SIZE (HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX - \
+ HAL_WBM_IDLE_SCATTER_NEXT_PTR_SIZE)
+
+#define HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX 48
+#define HAL_DSCP_TID_TBL_SIZE 24
+
+/* calculate the register address from bar0 of shadow register x */
+#define HAL_SHADOW_BASE_ADDR(ab) ab->hw_params.regs->hal_shadow_base_addr
+#define HAL_SHADOW_NUM_REGS 36
+#define HAL_HP_OFFSET_IN_REG_START 1
+#define HAL_OFFSET_FROM_HP_TO_TP 4
+
+#define HAL_SHADOW_REG(ab, x) (HAL_SHADOW_BASE_ADDR(ab) + (4 * (x)))
+
+/* WCSS Relative address */
+#define HAL_SEQ_WCSS_UMAC_OFFSET 0x00a00000
+#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
+#define HAL_SEQ_WCSS_UMAC_TCL_REG 0x00a44000
+#define HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(x) \
+ (ab->hw_params.regs->hal_seq_wcss_umac_ce0_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE0_DST_REG(x) \
+ (ab->hw_params.regs->hal_seq_wcss_umac_ce0_dst_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(x) \
+ (ab->hw_params.regs->hal_seq_wcss_umac_ce1_src_reg)
+#define HAL_SEQ_WCSS_UMAC_CE1_DST_REG(x) \
+ (ab->hw_params.regs->hal_seq_wcss_umac_ce1_dst_reg)
+#define HAL_SEQ_WCSS_UMAC_WBM_REG 0x00a34000
+
+#define HAL_CE_WFSS_CE_REG_BASE 0x01b80000
+#define HAL_WLAON_REG_BASE 0x01f80000
+
+/* SW2TCL(x) R0 ring configuration address */
+#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014
+#define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c
+#define HAL_TCL1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_lsb
+#define HAL_TCL1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_msb
+#define HAL_TCL1_RING_ID(ab) ab->hw_params.regs->hal_tcl1_ring_id
+#define HAL_TCL1_RING_MISC(ab) ab->hw_params.regs->hal_tcl1_ring_misc
+#define HAL_TCL1_RING_TP_ADDR_LSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_tp_addr_lsb
+#define HAL_TCL1_RING_TP_ADDR_MSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_tp_addr_msb
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix0
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix1
+#define HAL_TCL1_RING_MSI1_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_base_lsb
+#define HAL_TCL1_RING_MSI1_BASE_MSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_base_msb
+#define HAL_TCL1_RING_MSI1_DATA(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_data
+#define HAL_TCL2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl2_ring_base_lsb
+#define HAL_TCL_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl_ring_base_lsb
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_ID_OFFSET(ab) \
+ (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MISC_OFFSET(ab) \
+ (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+
+/* SW2TCL(x) R2 ring pointers (head/tail) address */
+#define HAL_TCL1_RING_HP 0x00002000
+#define HAL_TCL1_RING_TP 0x00002004
+#define HAL_TCL2_RING_HP 0x00002008
+#define HAL_TCL_RING_HP 0x00002018
+
+#define HAL_TCL1_RING_TP_OFFSET \
+ (HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
+
+/* TCL STATUS ring address */
+#define HAL_TCL_STATUS_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_tcl_status_ring_base_lsb
+#define HAL_TCL_STATUS_RING_HP 0x00002030
+
+/* REO2SW(x) R0 ring configuration address */
+#define HAL_REO1_GEN_ENABLE 0x00000000
+#define HAL_REO1_DEST_RING_CTRL_IX_0 0x00000004
+#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
+#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
+#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
+#define HAL_REO1_MISC_CTL(ab) ab->hw_params.regs->hal_reo1_misc_ctl
+#define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb
+#define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb
+#define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id
+#define HAL_REO1_RING_MISC(ab) ab->hw_params.regs->hal_reo1_ring_misc
+#define HAL_REO1_RING_HP_ADDR_LSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_hp_addr_lsb
+#define HAL_REO1_RING_HP_ADDR_MSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_hp_addr_msb
+#define HAL_REO1_RING_PRODUCER_INT_SETUP(ab) \
+ ab->hw_params.regs->hal_reo1_ring_producer_int_setup
+#define HAL_REO1_RING_MSI1_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_base_lsb
+#define HAL_REO1_RING_MSI1_BASE_MSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_base_msb
+#define HAL_REO1_RING_MSI1_DATA(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_data
+#define HAL_REO2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo2_ring_base_lsb
+#define HAL_REO1_AGING_THRESH_IX_0(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_0
+#define HAL_REO1_AGING_THRESH_IX_1(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_1
+#define HAL_REO1_AGING_THRESH_IX_2(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_2
+#define HAL_REO1_AGING_THRESH_IX_3(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_3
+
+#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_BASE_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_ID_OFFSET(ab) (HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab) \
+ (HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab) \
+ (HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MISC_OFFSET(ab) \
+ (HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab))
+
+/* REO2SW(x) R2 ring pointers (head/tail) address */
+#define HAL_REO1_RING_HP(ab) ab->hw_params.regs->hal_reo1_ring_hp
+#define HAL_REO1_RING_TP(ab) ab->hw_params.regs->hal_reo1_ring_tp
+#define HAL_REO2_RING_HP(ab) ab->hw_params.regs->hal_reo2_ring_hp
+
+#define HAL_REO1_RING_TP_OFFSET(ab) (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab))
+
+/* REO2TCL R0 ring configuration address */
+#define HAL_REO_TCL_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_tcl_ring_base_lsb
+
+/* REO2TCL R2 ring pointer (head/tail) address */
+#define HAL_REO_TCL_RING_HP(ab) ab->hw_params.regs->hal_reo_tcl_ring_hp
+
+/* REO CMD R0 address */
+#define HAL_REO_CMD_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_cmd_ring_base_lsb
+
+/* REO CMD R2 address */
+#define HAL_REO_CMD_HP(ab) ab->hw_params.regs->hal_reo_cmd_ring_hp
+
+/* SW2REO R0 address */
+#define HAL_SW2REO_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_sw2reo_ring_base_lsb
+
+/* SW2REO R2 address */
+#define HAL_SW2REO_RING_HP(ab) ab->hw_params.regs->hal_sw2reo_ring_hp
+
+/* CE ring R0 address */
+#define HAL_CE_DST_RING_BASE_LSB 0x00000000
+#define HAL_CE_DST_STATUS_RING_BASE_LSB 0x00000058
+#define HAL_CE_DST_RING_CTRL 0x000000b0
+
+/* CE ring R2 address */
+#define HAL_CE_DST_RING_HP 0x00000400
+#define HAL_CE_DST_STATUS_RING_HP 0x00000408
+
+/* REO status address */
+#define HAL_REO_STATUS_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_status_ring_base_lsb
+#define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp
+
+/* WBM Idle R0 address */
+#define HAL_WBM_IDLE_LINK_RING_BASE_LSB(x) \
+ (ab->hw_params.regs->hal_wbm_idle_link_ring_base_lsb)
+#define HAL_WBM_IDLE_LINK_RING_MISC_ADDR(x) \
+ (ab->hw_params.regs->hal_wbm_idle_link_ring_misc)
+#define HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR 0x00000048
+#define HAL_WBM_R0_IDLE_LIST_SIZE_ADDR 0x0000004c
+#define HAL_WBM_SCATTERED_RING_BASE_LSB 0x00000058
+#define HAL_WBM_SCATTERED_RING_BASE_MSB 0x0000005c
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0 0x00000068
+#define HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1 0x0000006c
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0 0x00000078
+#define HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1 0x0000007c
+#define HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR 0x00000084
+
+/* WBM Idle R2 address */
+#define HAL_WBM_IDLE_LINK_RING_HP 0x000030b0
+
+/* SW2WBM R0 release address */
+#define HAL_WBM_RELEASE_RING_BASE_LSB(x) \
+ (ab->hw_params.regs->hal_wbm_release_ring_base_lsb)
+
+/* SW2WBM R2 release address */
+#define HAL_WBM_RELEASE_RING_HP 0x00003018
+
+/* WBM2SW R0 release address */
+#define HAL_WBM0_RELEASE_RING_BASE_LSB(x) \
+ (ab->hw_params.regs->hal_wbm0_release_ring_base_lsb)
+#define HAL_WBM1_RELEASE_RING_BASE_LSB(x) \
+ (ab->hw_params.regs->hal_wbm1_release_ring_base_lsb)
+
+/* WBM2SW R2 release address */
+#define HAL_WBM0_RELEASE_RING_HP 0x000030c0
+#define HAL_WBM1_RELEASE_RING_HP 0x000030c8
+
+/* TCL ring field mask and offset */
+#define HAL_TCL1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_TCL1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE BIT(1)
+#define HAL_TCL1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_TCL1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_TCL1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_TCL1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD GENMASK(15, 0)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_TCL1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN BIT(17)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP GENMASK(31, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP0 GENMASK(2, 0)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP1 GENMASK(5, 3)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP2 GENMASK(8, 6)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP3 GENMASK(11, 9)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP4 GENMASK(14, 12)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP5 GENMASK(17, 15)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP6 GENMASK(20, 18)
+#define HAL_TCL1_RING_FIELD_DSCP_TID_MAP7 GENMASK(23, 21)
+
+/* REO ring field mask and offset */
+#define HAL_REO1_RING_BASE_MSB_RING_SIZE GENMASK(27, 8)
+#define HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB GENMASK(7, 0)
+#define HAL_REO1_RING_ID_RING_ID GENMASK(15, 8)
+#define HAL_REO1_RING_ID_ENTRY_SIZE GENMASK(7, 0)
+#define HAL_REO1_RING_MISC_MSI_SWAP BIT(3)
+#define HAL_REO1_RING_MISC_HOST_FW_SWAP BIT(4)
+#define HAL_REO1_RING_MISC_DATA_TLV_SWAP BIT(5)
+#define HAL_REO1_RING_MISC_SRNG_ENABLE BIT(6)
+#define HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD GENMASK(31, 16)
+#define HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD GENMASK(14, 0)
+#define HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE BIT(8)
+#define HAL_REO1_RING_MSI1_BASE_MSB_ADDR GENMASK(7, 0)
+#define HAL_REO1_GEN_ENABLE_FRAG_DST_RING GENMASK(25, 23)
+#define HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE BIT(2)
+#define HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE BIT(3)
+#define HAL_REO1_MISC_CTL_FRAGMENT_DST_RING GENMASK(20, 17)
+
+/* CE ring bit field mask and shift */
+#define HAL_CE_DST_R0_DEST_CTRL_MAX_LEN GENMASK(15, 0)
+
+#define HAL_ADDR_LSB_REG_MASK 0xffffffff
+
+#define HAL_ADDR_MSB_REG_SHIFT 32
+
+/* WBM ring bit field mask and shift */
+#define HAL_WBM_LINK_DESC_IDLE_LIST_MODE BIT(1)
+#define HAL_WBM_SCATTER_BUFFER_SIZE GENMASK(10, 2)
+#define HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST GENMASK(31, 16)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32 GENMASK(7, 0)
+#define HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG GENMASK(31, 8)
+
+#define HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1 GENMASK(20, 8)
+#define HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1 GENMASK(20, 8)
+
+#define BASE_ADDR_MATCH_TAG_VAL 0x5
+
+#define HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_CMD_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_SRC_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE 0x0000ffff
+#define HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE 0x000fffff
+#define HAL_RXDMA_RING_MAX_SIZE 0x0000ffff
+
+/* IPQ5018 ce registers */
+#define HAL_IPQ5018_CE_WFSS_REG_BASE 0x08400000
+#define HAL_IPQ5018_CE_SIZE 0x200000
+
+/* Add any other errors here and return them in
+ * ath11k_hal_rx_desc_get_err().
+ */
+
+enum hal_srng_ring_id {
+ HAL_SRNG_RING_ID_REO2SW1 = 0,
+ HAL_SRNG_RING_ID_REO2SW2,
+ HAL_SRNG_RING_ID_REO2SW3,
+ HAL_SRNG_RING_ID_REO2SW4,
+ HAL_SRNG_RING_ID_REO2TCL,
+ HAL_SRNG_RING_ID_SW2REO,
+
+ HAL_SRNG_RING_ID_REO_CMD = 8,
+ HAL_SRNG_RING_ID_REO_STATUS,
+
+ HAL_SRNG_RING_ID_SW2TCL1 = 16,
+ HAL_SRNG_RING_ID_SW2TCL2,
+ HAL_SRNG_RING_ID_SW2TCL3,
+ HAL_SRNG_RING_ID_SW2TCL4,
+
+ HAL_SRNG_RING_ID_SW2TCL_CMD = 24,
+ HAL_SRNG_RING_ID_TCL_STATUS,
+
+ HAL_SRNG_RING_ID_CE0_SRC = 32,
+ HAL_SRNG_RING_ID_CE1_SRC,
+ HAL_SRNG_RING_ID_CE2_SRC,
+ HAL_SRNG_RING_ID_CE3_SRC,
+ HAL_SRNG_RING_ID_CE4_SRC,
+ HAL_SRNG_RING_ID_CE5_SRC,
+ HAL_SRNG_RING_ID_CE6_SRC,
+ HAL_SRNG_RING_ID_CE7_SRC,
+ HAL_SRNG_RING_ID_CE8_SRC,
+ HAL_SRNG_RING_ID_CE9_SRC,
+ HAL_SRNG_RING_ID_CE10_SRC,
+ HAL_SRNG_RING_ID_CE11_SRC,
+
+ HAL_SRNG_RING_ID_CE0_DST = 56,
+ HAL_SRNG_RING_ID_CE1_DST,
+ HAL_SRNG_RING_ID_CE2_DST,
+ HAL_SRNG_RING_ID_CE3_DST,
+ HAL_SRNG_RING_ID_CE4_DST,
+ HAL_SRNG_RING_ID_CE5_DST,
+ HAL_SRNG_RING_ID_CE6_DST,
+ HAL_SRNG_RING_ID_CE7_DST,
+ HAL_SRNG_RING_ID_CE8_DST,
+ HAL_SRNG_RING_ID_CE9_DST,
+ HAL_SRNG_RING_ID_CE10_DST,
+ HAL_SRNG_RING_ID_CE11_DST,
+
+ HAL_SRNG_RING_ID_CE0_DST_STATUS = 80,
+ HAL_SRNG_RING_ID_CE1_DST_STATUS,
+ HAL_SRNG_RING_ID_CE2_DST_STATUS,
+ HAL_SRNG_RING_ID_CE3_DST_STATUS,
+ HAL_SRNG_RING_ID_CE4_DST_STATUS,
+ HAL_SRNG_RING_ID_CE5_DST_STATUS,
+ HAL_SRNG_RING_ID_CE6_DST_STATUS,
+ HAL_SRNG_RING_ID_CE7_DST_STATUS,
+ HAL_SRNG_RING_ID_CE8_DST_STATUS,
+ HAL_SRNG_RING_ID_CE9_DST_STATUS,
+ HAL_SRNG_RING_ID_CE10_DST_STATUS,
+ HAL_SRNG_RING_ID_CE11_DST_STATUS,
+
+ HAL_SRNG_RING_ID_WBM_IDLE_LINK = 104,
+ HAL_SRNG_RING_ID_WBM_SW_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW1_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW2_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW3_RELEASE,
+ HAL_SRNG_RING_ID_WBM2SW4_RELEASE,
+
+ HAL_SRNG_RING_ID_UMAC_ID_END = 127,
+ HAL_SRNG_RING_ID_LMAC1_ID_START,
+
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF = HAL_SRNG_RING_ID_LMAC1_ID_START,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
+ HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
+ HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
+ HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
+
+ HAL_SRNG_RING_ID_LMAC1_ID_END = 143
+};
+
+/* SRNG registers are split into two groups R0 and R2 */
+#define HAL_SRNG_REG_GRP_R0 0
+#define HAL_SRNG_REG_GRP_R2 1
+#define HAL_SRNG_NUM_REG_GRP 2
+
+#define HAL_SRNG_NUM_LMACS 3
+#define HAL_SRNG_REO_EXCEPTION HAL_SRNG_RING_ID_REO2SW1
+#define HAL_SRNG_RINGS_PER_LMAC (HAL_SRNG_RING_ID_LMAC1_ID_END - \
+ HAL_SRNG_RING_ID_LMAC1_ID_START)
+#define HAL_SRNG_NUM_LMAC_RINGS (HAL_SRNG_NUM_LMACS * HAL_SRNG_RINGS_PER_LMAC)
+#define HAL_SRNG_RING_ID_MAX (HAL_SRNG_RING_ID_UMAC_ID_END + \
+ HAL_SRNG_NUM_LMAC_RINGS)
+
+enum hal_ring_type {
+ HAL_REO_DST,
+ HAL_REO_EXCEPTION,
+ HAL_REO_REINJECT,
+ HAL_REO_CMD,
+ HAL_REO_STATUS,
+ HAL_TCL_DATA,
+ HAL_TCL_CMD,
+ HAL_TCL_STATUS,
+ HAL_CE_SRC,
+ HAL_CE_DST,
+ HAL_CE_DST_STATUS,
+ HAL_WBM_IDLE_LINK,
+ HAL_SW2WBM_RELEASE,
+ HAL_WBM2SW_RELEASE,
+ HAL_RXDMA_BUF,
+ HAL_RXDMA_DST,
+ HAL_RXDMA_MONITOR_BUF,
+ HAL_RXDMA_MONITOR_STATUS,
+ HAL_RXDMA_MONITOR_DST,
+ HAL_RXDMA_MONITOR_DESC,
+ HAL_RXDMA_DIR_BUF,
+ HAL_MAX_RING_TYPES,
+};
+
+#define HAL_RX_MAX_BA_WINDOW 256
+
+#define HAL_DEFAULT_REO_TIMEOUT_USEC (40 * 1000)
+
+/**
+ * enum hal_reo_cmd_type: Enum for REO command type
+ * @HAL_REO_CMD_GET_QUEUE_STATS: Get REO queue status/stats
+ * @HAL_REO_CMD_FLUSH_QUEUE: Flush all frames in REO queue
+ * @HAL_REO_CMD_FLUSH_CACHE: Flush descriptor entries in the cache
+ * @HAL_REO_CMD_UNBLOCK_CACHE: Unblock a descriptor's address that was blocked
+ * earlier with a 'REO_FLUSH_CACHE' command
+ * @HAL_REO_CMD_FLUSH_TIMEOUT_LIST: Flush buffers/descriptors from timeout list
+ * @HAL_REO_CMD_UPDATE_RX_QUEUE: Update REO queue settings
+ */
+enum hal_reo_cmd_type {
+ HAL_REO_CMD_GET_QUEUE_STATS = 0,
+ HAL_REO_CMD_FLUSH_QUEUE = 1,
+ HAL_REO_CMD_FLUSH_CACHE = 2,
+ HAL_REO_CMD_UNBLOCK_CACHE = 3,
+ HAL_REO_CMD_FLUSH_TIMEOUT_LIST = 4,
+ HAL_REO_CMD_UPDATE_RX_QUEUE = 5,
+};
+
+/**
+ * enum hal_reo_cmd_status: Enum for execution status of REO command
+ * @HAL_REO_CMD_SUCCESS: Command has successfully executed
+ * @HAL_REO_CMD_BLOCKED: Command could not be executed as the queue
+ * or cache was blocked
+ * @HAL_REO_CMD_FAILED: Command execution failed, could be due to
+ * invalid queue desc
+ * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_DRAIN:
+ */
+enum hal_reo_cmd_status {
+ HAL_REO_CMD_SUCCESS = 0,
+ HAL_REO_CMD_BLOCKED = 1,
+ HAL_REO_CMD_FAILED = 2,
+ HAL_REO_CMD_RESOURCE_BLOCKED = 3,
+ HAL_REO_CMD_DRAIN = 0xff,
+};
+
+struct hal_wbm_idle_scatter_list {
+ dma_addr_t paddr;
+ struct hal_wbm_link_desc *vaddr;
+};
+
+struct hal_srng_params {
+ dma_addr_t ring_base_paddr;
+ u32 *ring_base_vaddr;
+ int num_entries;
+ u32 intr_batch_cntr_thres_entries;
+ u32 intr_timer_thres_us;
+ u32 flags;
+ u32 max_buffer_len;
+ u32 low_threshold;
+ dma_addr_t msi_addr;
+ u32 msi_data;
+
+ /* Add more params as needed */
+};
+
+enum hal_srng_dir {
+ HAL_SRNG_DIR_SRC,
+ HAL_SRNG_DIR_DST
+};
+
+/* srng flags */
+#define HAL_SRNG_FLAGS_MSI_SWAP 0x00000008
+#define HAL_SRNG_FLAGS_RING_PTR_SWAP 0x00000010
+#define HAL_SRNG_FLAGS_DATA_TLV_SWAP 0x00000020
+#define HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN 0x00010000
+#define HAL_SRNG_FLAGS_MSI_INTR 0x00020000
+#define HAL_SRNG_FLAGS_CACHED 0x20000000
+#define HAL_SRNG_FLAGS_LMAC_RING 0x80000000
+#define HAL_SRNG_FLAGS_REMAP_CE_RING 0x10000000
+
+#define HAL_SRNG_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_SRNG_TLV_HDR_LEN GENMASK(25, 10)
+
+/* Common SRNG ring structure for source and destination rings */
+struct hal_srng {
+ /* Unique SRNG ring ID */
+ u8 ring_id;
+
+ /* Ring initialization done */
+ u8 initialized;
+
+ /* Interrupt/MSI value assigned to this ring */
+ int irq;
+
+ /* Physical base address of the ring */
+ dma_addr_t ring_base_paddr;
+
+ /* Virtual base address of the ring */
+ u32 *ring_base_vaddr;
+
+ /* Number of entries in ring */
+ u32 num_entries;
+
+ /* Ring size */
+ u32 ring_size;
+
+ /* Ring size mask */
+ u32 ring_size_mask;
+
+ /* Size of ring entry */
+ u32 entry_size;
+
+ /* Interrupt timer threshold - in micro seconds */
+ u32 intr_timer_thres_us;
+
+ /* Interrupt batch counter threshold - in number of ring entries */
+ u32 intr_batch_cntr_thres_entries;
+
+ /* MSI Address */
+ dma_addr_t msi_addr;
+
+ /* MSI data */
+ u32 msi_data;
+
+ /* Misc flags */
+ u32 flags;
+
+ /* Lock for serializing ring index updates */
+ spinlock_t lock;
+
+ /* Start offset of SRNG register groups for this ring
+ * TBD: See if this is required - register address can be derived
+ * from ring ID
+ */
+ u32 hwreg_base[HAL_SRNG_NUM_REG_GRP];
+
+ u64 timestamp;
+
+ /* Source or Destination ring */
+ enum hal_srng_dir ring_dir;
+
+ union {
+ struct {
+ /* SW tail pointer */
+ u32 tp;
+
+ /* Shadow head pointer location to be updated by HW */
+ volatile u32 *hp_addr;
+
+ /* Cached head pointer */
+ u32 cached_hp;
+
+ /* Tail pointer location to be updated by SW - This
+ * will be a register address and need not be
+ * accessed through SW structure
+ */
+ u32 *tp_addr;
+
+ /* Current SW loop cnt */
+ u32 loop_cnt;
+
+ /* max transfer size */
+ u16 max_buffer_length;
+
+ /* head pointer at access end */
+ u32 last_hp;
+ } dst_ring;
+
+ struct {
+ /* SW head pointer */
+ u32 hp;
+
+ /* SW reap head pointer */
+ u32 reap_hp;
+
+ /* Shadow tail pointer location to be updated by HW */
+ u32 *tp_addr;
+
+ /* Cached tail pointer */
+ u32 cached_tp;
+
+ /* Head pointer location to be updated by SW - This
+ * will be a register address and need not be accessed
+ * through SW structure
+ */
+ u32 *hp_addr;
+
+ /* Low threshold - in number of ring entries */
+ u32 low_threshold;
+
+ /* tail pointer at access end */
+ u32 last_tp;
+ } src_ring;
+ } u;
+};
+
+/* Interrupt mitigation - Batch threshold in terms of number of frames */
+#define HAL_SRNG_INT_BATCH_THRESHOLD_TX 256
+#define HAL_SRNG_INT_BATCH_THRESHOLD_RX 128
+#define HAL_SRNG_INT_BATCH_THRESHOLD_OTHER 1
+
+/* Interrupt mitigation - timer threshold in us */
+#define HAL_SRNG_INT_TIMER_THRESHOLD_TX 1000
+#define HAL_SRNG_INT_TIMER_THRESHOLD_RX 500
+#define HAL_SRNG_INT_TIMER_THRESHOLD_OTHER 256
+
+/* HW SRNG configuration table */
+struct hal_srng_config {
+ int start_ring_id;
+ u16 max_rings;
+ u16 entry_size;
+ u32 reg_start[HAL_SRNG_NUM_REG_GRP];
+ u16 reg_size[HAL_SRNG_NUM_REG_GRP];
+ u8 lmac_ring;
+ enum hal_srng_dir ring_dir;
+ u32 max_size;
+};
+
+/**
+ * enum hal_rx_buf_return_buf_manager
+ *
+ * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
+ * @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list.
+ * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
+ * @HAL_RX_BUF_RBM_SW0_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW1_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW2_BM: For Tx completion -- returned to host
+ * @HAL_RX_BUF_RBM_SW3_BM: For Rx release -- returned to host
+ */
+
+enum hal_rx_buf_return_buf_manager {
+ HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
+ HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_FW_BM,
+ HAL_RX_BUF_RBM_SW0_BM,
+ HAL_RX_BUF_RBM_SW1_BM,
+ HAL_RX_BUF_RBM_SW2_BM,
+ HAL_RX_BUF_RBM_SW3_BM,
+ HAL_RX_BUF_RBM_SW4_BM,
+};
+
+#define HAL_SRNG_DESC_LOOP_CNT 0xf0000000
+
+#define HAL_REO_CMD_FLG_NEED_STATUS BIT(0)
+#define HAL_REO_CMD_FLG_STATS_CLEAR BIT(1)
+#define HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER BIT(2)
+#define HAL_REO_CMD_FLG_FLUSH_RELEASE_BLOCKING BIT(3)
+#define HAL_REO_CMD_FLG_FLUSH_NO_INVAL BIT(4)
+#define HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS BIT(5)
+#define HAL_REO_CMD_FLG_FLUSH_ALL BIT(6)
+#define HAL_REO_CMD_FLG_UNBLK_RESOURCE BIT(7)
+#define HAL_REO_CMD_FLG_UNBLK_CACHE BIT(8)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+#define HAL_REO_CMD_UPD0_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_CMD_UPD0_VLD BIT(9)
+#define HAL_REO_CMD_UPD0_ALDC BIT(10)
+#define HAL_REO_CMD_UPD0_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_CMD_UPD0_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_CMD_UPD0_AC BIT(13)
+#define HAL_REO_CMD_UPD0_BAR BIT(14)
+#define HAL_REO_CMD_UPD0_RETRY BIT(15)
+#define HAL_REO_CMD_UPD0_CHECK_2K_MODE BIT(16)
+#define HAL_REO_CMD_UPD0_OOR_MODE BIT(17)
+#define HAL_REO_CMD_UPD0_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_CMD_UPD0_PN_CHECK BIT(19)
+#define HAL_REO_CMD_UPD0_EVEN_PN BIT(20)
+#define HAL_REO_CMD_UPD0_UNEVEN_PN BIT(21)
+#define HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_CMD_UPD0_PN_SIZE BIT(23)
+#define HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_CMD_UPD0_SVLD BIT(25)
+#define HAL_REO_CMD_UPD0_SSN BIT(26)
+#define HAL_REO_CMD_UPD0_SEQ_2K_ERR BIT(27)
+#define HAL_REO_CMD_UPD0_PN_ERR BIT(28)
+#define HAL_REO_CMD_UPD0_PN_VALID BIT(29)
+#define HAL_REO_CMD_UPD0_PN BIT(30)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+#define HAL_REO_CMD_UPD1_VLD BIT(16)
+#define HAL_REO_CMD_UPD1_ALDC GENMASK(18, 17)
+#define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_CMD_UPD1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_CMD_UPD1_AC GENMASK(22, 21)
+#define HAL_REO_CMD_UPD1_BAR BIT(23)
+#define HAL_REO_CMD_UPD1_RETRY BIT(24)
+#define HAL_REO_CMD_UPD1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_CMD_UPD1_OOR_MODE BIT(26)
+#define HAL_REO_CMD_UPD1_PN_CHECK BIT(27)
+#define HAL_REO_CMD_UPD1_EVEN_PN BIT(28)
+#define HAL_REO_CMD_UPD1_UNEVEN_PN BIT(29)
+#define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG BIT(31)
+
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+#define HAL_REO_CMD_UPD2_SVLD BIT(10)
+#define HAL_REO_CMD_UPD2_SSN GENMASK(22, 11)
+#define HAL_REO_CMD_UPD2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_CMD_UPD2_PN_ERR BIT(24)
+
+#define HAL_REO_DEST_RING_CTRL_HASH_RING_MAP GENMASK(31, 8)
+
+struct ath11k_hal_reo_cmd {
+ u32 addr_lo;
+ u32 flag;
+ u32 upd0;
+ u32 upd1;
+ u32 upd2;
+ u32 pn[4];
+ u16 rx_queue_num;
+ u16 min_rel;
+ u16 min_fwd;
+ u8 addr_hi;
+ u8 ac_list;
+ u8 blocking_idx;
+ u16 ba_window_size;
+ u8 pn_size;
+};
+
+enum hal_pn_type {
+ HAL_PN_TYPE_NONE,
+ HAL_PN_TYPE_WPA,
+ HAL_PN_TYPE_WAPI_EVEN,
+ HAL_PN_TYPE_WAPI_UNEVEN,
+};
+
+enum hal_ce_desc {
+ HAL_CE_DESC_SRC,
+ HAL_CE_DESC_DST,
+ HAL_CE_DESC_DST_STATUS,
+};
+
+#define HAL_HASH_ROUTING_RING_TCL 0
+#define HAL_HASH_ROUTING_RING_SW1 1
+#define HAL_HASH_ROUTING_RING_SW2 2
+#define HAL_HASH_ROUTING_RING_SW3 3
+#define HAL_HASH_ROUTING_RING_SW4 4
+#define HAL_HASH_ROUTING_RING_REL 5
+#define HAL_HASH_ROUTING_RING_FW 6
+
+struct hal_reo_status_header {
+ u16 cmd_num;
+ enum hal_reo_cmd_status cmd_status;
+ u16 cmd_exe_time;
+ u32 timestamp;
+};
+
+struct hal_reo_status_queue_stats {
+ u16 ssn;
+ u16 curr_idx;
+ u32 pn[4];
+ u32 last_rx_queue_ts;
+ u32 last_rx_dequeue_ts;
+ u32 rx_bitmap[8]; /* Bitmap from 0-255 */
+ u32 curr_mpdu_cnt;
+ u32 curr_msdu_cnt;
+ u16 fwd_due_to_bar_cnt;
+ u16 dup_cnt;
+ u32 frames_in_order_cnt;
+ u32 num_mpdu_processed_cnt;
+ u32 num_msdu_processed_cnt;
+ u32 total_num_processed_byte_cnt;
+ u32 late_rx_mpdu_cnt;
+ u32 reorder_hole_cnt;
+ u8 timeout_cnt;
+ u8 bar_rx_cnt;
+ u8 num_window_2k_jump_cnt;
+};
+
+struct hal_reo_status_flush_queue {
+ bool err_detected;
+};
+
+enum hal_reo_status_flush_cache_err_code {
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_SUCCESS,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_IN_USE,
+ HAL_REO_STATUS_FLUSH_CACHE_ERR_CODE_NOT_FOUND,
+};
+
+struct hal_reo_status_flush_cache {
+ bool err_detected;
+ enum hal_reo_status_flush_cache_err_code err_code;
+ bool cache_controller_flush_status_hit;
+ u8 cache_controller_flush_status_desc_type;
+ u8 cache_controller_flush_status_client_id;
+ u8 cache_controller_flush_status_err;
+ u8 cache_controller_flush_status_cnt;
+};
+
+enum hal_reo_status_unblock_cache_type {
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE,
+ HAL_REO_STATUS_UNBLOCK_ENTIRE_CACHE_USAGE,
+};
+
+struct hal_reo_status_unblock_cache {
+ bool err_detected;
+ enum hal_reo_status_unblock_cache_type unblock_type;
+};
+
+struct hal_reo_status_flush_timeout_list {
+ bool err_detected;
+ bool list_empty;
+ u16 release_desc_cnt;
+ u16 fwd_buf_cnt;
+};
+
+enum hal_reo_threshold_idx {
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER0,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER1,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER2,
+ HAL_REO_THRESHOLD_IDX_DESC_COUNTER_SUM,
+};
+
+struct hal_reo_status_desc_thresh_reached {
+ enum hal_reo_threshold_idx threshold_idx;
+ u32 link_desc_counter0;
+ u32 link_desc_counter1;
+ u32 link_desc_counter2;
+ u32 link_desc_counter_sum;
+};
+
+struct hal_reo_status {
+ struct hal_reo_status_header uniform_hdr;
+ u8 loop_cnt;
+ union {
+ struct hal_reo_status_queue_stats queue_stats;
+ struct hal_reo_status_flush_queue flush_queue;
+ struct hal_reo_status_flush_cache flush_cache;
+ struct hal_reo_status_unblock_cache unblock_cache;
+ struct hal_reo_status_flush_timeout_list timeout_list;
+ struct hal_reo_status_desc_thresh_reached desc_thresh_reached;
+ } u;
+};
+
+/* HAL context to be used to access SRNG APIs (currently used by data path
+ * and transport (CE) modules)
+ */
+struct ath11k_hal {
+ /* HAL internal state for all SRNG rings.
+ */
+ struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
+
+ /* SRNG configuration table */
+ struct hal_srng_config *srng_config;
+
+ /* Remote pointer memory for HW/FW updates */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } rdp;
+
+ /* Shared memory for ring pointer updates from host to FW */
+ struct {
+ u32 *vaddr;
+ dma_addr_t paddr;
+ } wrp;
+
+ /* Available REO blocking resources bitmap */
+ u8 avail_blk_resource;
+
+ u8 current_blk_index;
+
+ /* shadow register configuration */
+ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
+ int num_shadow_reg_configured;
+
+ struct lock_class_key srng_key[HAL_SRNG_RING_ID_MAX];
+};
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid);
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type);
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
+ struct hal_wbm_idle_scatter_list *sbuf,
+ u32 nsbufs, u32 tot_link_desc,
+ u32 end_offset);
+
+dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
+ dma_addr_t paddr);
+u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type);
+void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
+ u8 byte_swap_data);
+void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr);
+u32 ath11k_hal_ce_dst_status_get_length(void *buf);
+int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type);
+int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type);
+void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
+ struct hal_srng_params *params);
+u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
+ struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
+ struct hal_srng *srng);
+int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
+ bool sync_hw_ptr);
+void ath11k_hal_srng_access_begin(struct ath11k_base *ab,
+ struct hal_srng *srng);
+void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng);
+int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
+ int ring_num, int mac_id,
+ struct hal_srng_params *params);
+int ath11k_hal_srng_init(struct ath11k_base *ath11k);
+void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
+void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
+void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len);
+int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num);
+void ath11k_hal_srng_shadow_config(struct ath11k_base *ab);
+void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
+ struct hal_srng *srng);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
new file mode 100644
index 0000000000..d895ea878d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -0,0 +1,2493 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include "core.h"
+
+#ifndef ATH11K_HAL_DESC_H
+#define ATH11K_HAL_DESC_H
+
+#define BUFFER_ADDR_INFO0_ADDR GENMASK(31, 0)
+
+#define BUFFER_ADDR_INFO1_ADDR GENMASK(7, 0)
+#define BUFFER_ADDR_INFO1_RET_BUF_MGR GENMASK(10, 8)
+#define BUFFER_ADDR_INFO1_SW_COOKIE GENMASK(31, 11)
+
+struct ath11k_buffer_addr {
+ u32 info0;
+ u32 info1;
+} __packed;
+
+/* ath11k_buffer_addr
+ *
+ * info0
+ * Address (lower 32 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * addr
+ * Address (upper 8 bits) of the msdu buffer or msdu extension
+ * descriptor or Link descriptor
+ *
+ * return_buffer_manager (RBM)
+ * Consumer: WBM
+ * Producer: SW/FW
+ * Indicates to which buffer manager the buffer or MSDU_EXTENSION
+ * descriptor or link descriptor that is being pointed to shall be
+ * returned after the frame has been processed. It is used by WBM
+ * for routing purposes.
+ *
+ * Values are defined in enum %HAL_RX_BUF_RBM_
+ *
+ * sw_buffer_cookie
+ * Cookie field exclusively used by SW. HW ignores the contents,
+ * accept that it passes the programmed value on to other
+ * descriptors together with the physical address.
+ *
+ * Field can be used by SW to for example associate the buffers
+ * physical address with the virtual address.
+ */
+
+enum hal_tlv_tag {
+ HAL_MACTX_CBF_START = 0 /* 0x0 */,
+ HAL_PHYRX_DATA = 1 /* 0x1 */,
+ HAL_PHYRX_CBF_DATA_RESP = 2 /* 0x2 */,
+ HAL_PHYRX_ABORT_REQUEST = 3 /* 0x3 */,
+ HAL_PHYRX_USER_ABORT_NOTIFICATION = 4 /* 0x4 */,
+ HAL_MACTX_DATA_RESP = 5 /* 0x5 */,
+ HAL_MACTX_CBF_DATA = 6 /* 0x6 */,
+ HAL_MACTX_CBF_DONE = 7 /* 0x7 */,
+ HAL_MACRX_CBF_READ_REQUEST = 8 /* 0x8 */,
+ HAL_MACRX_CBF_DATA_REQUEST = 9 /* 0x9 */,
+ HAL_MACRX_EXPECT_NDP_RECEPTION = 10 /* 0xa */,
+ HAL_MACRX_FREEZE_CAPTURE_CHANNEL = 11 /* 0xb */,
+ HAL_MACRX_NDP_TIMEOUT = 12 /* 0xc */,
+ HAL_MACRX_ABORT_ACK = 13 /* 0xd */,
+ HAL_MACRX_REQ_IMPLICIT_FB = 14 /* 0xe */,
+ HAL_MACRX_CHAIN_MASK = 15 /* 0xf */,
+ HAL_MACRX_NAP_USER = 16 /* 0x10 */,
+ HAL_MACRX_ABORT_REQUEST = 17 /* 0x11 */,
+ HAL_PHYTX_OTHER_TRANSMIT_INFO16 = 18 /* 0x12 */,
+ HAL_PHYTX_ABORT_ACK = 19 /* 0x13 */,
+ HAL_PHYTX_ABORT_REQUEST = 20 /* 0x14 */,
+ HAL_PHYTX_PKT_END = 21 /* 0x15 */,
+ HAL_PHYTX_PPDU_HEADER_INFO_REQUEST = 22 /* 0x16 */,
+ HAL_PHYTX_REQUEST_CTRL_INFO = 23 /* 0x17 */,
+ HAL_PHYTX_DATA_REQUEST = 24 /* 0x18 */,
+ HAL_PHYTX_BF_CV_LOADING_DONE = 25 /* 0x19 */,
+ HAL_PHYTX_NAP_ACK = 26 /* 0x1a */,
+ HAL_PHYTX_NAP_DONE = 27 /* 0x1b */,
+ HAL_PHYTX_OFF_ACK = 28 /* 0x1c */,
+ HAL_PHYTX_ON_ACK = 29 /* 0x1d */,
+ HAL_PHYTX_SYNTH_OFF_ACK = 30 /* 0x1e */,
+ HAL_PHYTX_DEBUG16 = 31 /* 0x1f */,
+ HAL_MACTX_ABORT_REQUEST = 32 /* 0x20 */,
+ HAL_MACTX_ABORT_ACK = 33 /* 0x21 */,
+ HAL_MACTX_PKT_END = 34 /* 0x22 */,
+ HAL_MACTX_PRE_PHY_DESC = 35 /* 0x23 */,
+ HAL_MACTX_BF_PARAMS_COMMON = 36 /* 0x24 */,
+ HAL_MACTX_BF_PARAMS_PER_USER = 37 /* 0x25 */,
+ HAL_MACTX_PREFETCH_CV = 38 /* 0x26 */,
+ HAL_MACTX_USER_DESC_COMMON = 39 /* 0x27 */,
+ HAL_MACTX_USER_DESC_PER_USER = 40 /* 0x28 */,
+ HAL_EXAMPLE_USER_TLV_16 = 41 /* 0x29 */,
+ HAL_EXAMPLE_TLV_16 = 42 /* 0x2a */,
+ HAL_MACTX_PHY_OFF = 43 /* 0x2b */,
+ HAL_MACTX_PHY_ON = 44 /* 0x2c */,
+ HAL_MACTX_SYNTH_OFF = 45 /* 0x2d */,
+ HAL_MACTX_EXPECT_CBF_COMMON = 46 /* 0x2e */,
+ HAL_MACTX_EXPECT_CBF_PER_USER = 47 /* 0x2f */,
+ HAL_MACTX_PHY_DESC = 48 /* 0x30 */,
+ HAL_MACTX_L_SIG_A = 49 /* 0x31 */,
+ HAL_MACTX_L_SIG_B = 50 /* 0x32 */,
+ HAL_MACTX_HT_SIG = 51 /* 0x33 */,
+ HAL_MACTX_VHT_SIG_A = 52 /* 0x34 */,
+ HAL_MACTX_VHT_SIG_B_SU20 = 53 /* 0x35 */,
+ HAL_MACTX_VHT_SIG_B_SU40 = 54 /* 0x36 */,
+ HAL_MACTX_VHT_SIG_B_SU80 = 55 /* 0x37 */,
+ HAL_MACTX_VHT_SIG_B_SU160 = 56 /* 0x38 */,
+ HAL_MACTX_VHT_SIG_B_MU20 = 57 /* 0x39 */,
+ HAL_MACTX_VHT_SIG_B_MU40 = 58 /* 0x3a */,
+ HAL_MACTX_VHT_SIG_B_MU80 = 59 /* 0x3b */,
+ HAL_MACTX_VHT_SIG_B_MU160 = 60 /* 0x3c */,
+ HAL_MACTX_SERVICE = 61 /* 0x3d */,
+ HAL_MACTX_HE_SIG_A_SU = 62 /* 0x3e */,
+ HAL_MACTX_HE_SIG_A_MU_DL = 63 /* 0x3f */,
+ HAL_MACTX_HE_SIG_A_MU_UL = 64 /* 0x40 */,
+ HAL_MACTX_HE_SIG_B1_MU = 65 /* 0x41 */,
+ HAL_MACTX_HE_SIG_B2_MU = 66 /* 0x42 */,
+ HAL_MACTX_HE_SIG_B2_OFDMA = 67 /* 0x43 */,
+ HAL_MACTX_DELETE_CV = 68 /* 0x44 */,
+ HAL_MACTX_MU_UPLINK_COMMON = 69 /* 0x45 */,
+ HAL_MACTX_MU_UPLINK_USER_SETUP = 70 /* 0x46 */,
+ HAL_MACTX_OTHER_TRANSMIT_INFO = 71 /* 0x47 */,
+ HAL_MACTX_PHY_NAP = 72 /* 0x48 */,
+ HAL_MACTX_DEBUG = 73 /* 0x49 */,
+ HAL_PHYRX_ABORT_ACK = 74 /* 0x4a */,
+ HAL_PHYRX_GENERATED_CBF_DETAILS = 75 /* 0x4b */,
+ HAL_PHYRX_RSSI_LEGACY = 76 /* 0x4c */,
+ HAL_PHYRX_RSSI_HT = 77 /* 0x4d */,
+ HAL_PHYRX_USER_INFO = 78 /* 0x4e */,
+ HAL_PHYRX_PKT_END = 79 /* 0x4f */,
+ HAL_PHYRX_DEBUG = 80 /* 0x50 */,
+ HAL_PHYRX_CBF_TRANSFER_DONE = 81 /* 0x51 */,
+ HAL_PHYRX_CBF_TRANSFER_ABORT = 82 /* 0x52 */,
+ HAL_PHYRX_L_SIG_A = 83 /* 0x53 */,
+ HAL_PHYRX_L_SIG_B = 84 /* 0x54 */,
+ HAL_PHYRX_HT_SIG = 85 /* 0x55 */,
+ HAL_PHYRX_VHT_SIG_A = 86 /* 0x56 */,
+ HAL_PHYRX_VHT_SIG_B_SU20 = 87 /* 0x57 */,
+ HAL_PHYRX_VHT_SIG_B_SU40 = 88 /* 0x58 */,
+ HAL_PHYRX_VHT_SIG_B_SU80 = 89 /* 0x59 */,
+ HAL_PHYRX_VHT_SIG_B_SU160 = 90 /* 0x5a */,
+ HAL_PHYRX_VHT_SIG_B_MU20 = 91 /* 0x5b */,
+ HAL_PHYRX_VHT_SIG_B_MU40 = 92 /* 0x5c */,
+ HAL_PHYRX_VHT_SIG_B_MU80 = 93 /* 0x5d */,
+ HAL_PHYRX_VHT_SIG_B_MU160 = 94 /* 0x5e */,
+ HAL_PHYRX_HE_SIG_A_SU = 95 /* 0x5f */,
+ HAL_PHYRX_HE_SIG_A_MU_DL = 96 /* 0x60 */,
+ HAL_PHYRX_HE_SIG_A_MU_UL = 97 /* 0x61 */,
+ HAL_PHYRX_HE_SIG_B1_MU = 98 /* 0x62 */,
+ HAL_PHYRX_HE_SIG_B2_MU = 99 /* 0x63 */,
+ HAL_PHYRX_HE_SIG_B2_OFDMA = 100 /* 0x64 */,
+ HAL_PHYRX_OTHER_RECEIVE_INFO = 101 /* 0x65 */,
+ HAL_PHYRX_COMMON_USER_INFO = 102 /* 0x66 */,
+ HAL_PHYRX_DATA_DONE = 103 /* 0x67 */,
+ HAL_RECEIVE_RSSI_INFO = 104 /* 0x68 */,
+ HAL_RECEIVE_USER_INFO = 105 /* 0x69 */,
+ HAL_MIMO_CONTROL_INFO = 106 /* 0x6a */,
+ HAL_RX_LOCATION_INFO = 107 /* 0x6b */,
+ HAL_COEX_TX_REQ = 108 /* 0x6c */,
+ HAL_DUMMY = 109 /* 0x6d */,
+ HAL_RX_TIMING_OFFSET_INFO = 110 /* 0x6e */,
+ HAL_EXAMPLE_TLV_32_NAME = 111 /* 0x6f */,
+ HAL_MPDU_LIMIT = 112 /* 0x70 */,
+ HAL_NA_LENGTH_END = 113 /* 0x71 */,
+ HAL_OLE_BUF_STATUS = 114 /* 0x72 */,
+ HAL_PCU_PPDU_SETUP_DONE = 115 /* 0x73 */,
+ HAL_PCU_PPDU_SETUP_END = 116 /* 0x74 */,
+ HAL_PCU_PPDU_SETUP_INIT = 117 /* 0x75 */,
+ HAL_PCU_PPDU_SETUP_START = 118 /* 0x76 */,
+ HAL_PDG_FES_SETUP = 119 /* 0x77 */,
+ HAL_PDG_RESPONSE = 120 /* 0x78 */,
+ HAL_PDG_TX_REQ = 121 /* 0x79 */,
+ HAL_SCH_WAIT_INSTR = 122 /* 0x7a */,
+ HAL_SCHEDULER_TLV = 123 /* 0x7b */,
+ HAL_TQM_FLOW_EMPTY_STATUS = 124 /* 0x7c */,
+ HAL_TQM_FLOW_NOT_EMPTY_STATUS = 125 /* 0x7d */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST = 126 /* 0x7e */,
+ HAL_TQM_GEN_MPDU_LENGTH_LIST_STATUS = 127 /* 0x7f */,
+ HAL_TQM_GEN_MPDUS = 128 /* 0x80 */,
+ HAL_TQM_GEN_MPDUS_STATUS = 129 /* 0x81 */,
+ HAL_TQM_REMOVE_MPDU = 130 /* 0x82 */,
+ HAL_TQM_REMOVE_MPDU_STATUS = 131 /* 0x83 */,
+ HAL_TQM_REMOVE_MSDU = 132 /* 0x84 */,
+ HAL_TQM_REMOVE_MSDU_STATUS = 133 /* 0x85 */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT = 134 /* 0x86 */,
+ HAL_TQM_WRITE_CMD = 135 /* 0x87 */,
+ HAL_OFDMA_TRIGGER_DETAILS = 136 /* 0x88 */,
+ HAL_TX_DATA = 137 /* 0x89 */,
+ HAL_TX_FES_SETUP = 138 /* 0x8a */,
+ HAL_RX_PACKET = 139 /* 0x8b */,
+ HAL_EXPECTED_RESPONSE = 140 /* 0x8c */,
+ HAL_TX_MPDU_END = 141 /* 0x8d */,
+ HAL_TX_MPDU_START = 142 /* 0x8e */,
+ HAL_TX_MSDU_END = 143 /* 0x8f */,
+ HAL_TX_MSDU_START = 144 /* 0x90 */,
+ HAL_TX_SW_MODE_SETUP = 145 /* 0x91 */,
+ HAL_TXPCU_BUFFER_STATUS = 146 /* 0x92 */,
+ HAL_TXPCU_USER_BUFFER_STATUS = 147 /* 0x93 */,
+ HAL_DATA_TO_TIME_CONFIG = 148 /* 0x94 */,
+ HAL_EXAMPLE_USER_TLV_32 = 149 /* 0x95 */,
+ HAL_MPDU_INFO = 150 /* 0x96 */,
+ HAL_PDG_USER_SETUP = 151 /* 0x97 */,
+ HAL_TX_11AH_SETUP = 152 /* 0x98 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE_STATUS = 153 /* 0x99 */,
+ HAL_TX_PEER_ENTRY = 154 /* 0x9a */,
+ HAL_TX_RAW_OR_NATIVE_FRAME_SETUP = 155 /* 0x9b */,
+ HAL_EXAMPLE_STRUCT_NAME = 156 /* 0x9c */,
+ HAL_PCU_PPDU_SETUP_END_INFO = 157 /* 0x9d */,
+ HAL_PPDU_RATE_SETTING = 158 /* 0x9e */,
+ HAL_PROT_RATE_SETTING = 159 /* 0x9f */,
+ HAL_RX_MPDU_DETAILS = 160 /* 0xa0 */,
+ HAL_EXAMPLE_USER_TLV_42 = 161 /* 0xa1 */,
+ HAL_RX_MSDU_LINK = 162 /* 0xa2 */,
+ HAL_RX_REO_QUEUE = 163 /* 0xa3 */,
+ HAL_ADDR_SEARCH_ENTRY = 164 /* 0xa4 */,
+ HAL_SCHEDULER_CMD = 165 /* 0xa5 */,
+ HAL_TX_FLUSH = 166 /* 0xa6 */,
+ HAL_TQM_ENTRANCE_RING = 167 /* 0xa7 */,
+ HAL_TX_DATA_WORD = 168 /* 0xa8 */,
+ HAL_TX_MPDU_DETAILS = 169 /* 0xa9 */,
+ HAL_TX_MPDU_LINK = 170 /* 0xaa */,
+ HAL_TX_MPDU_LINK_PTR = 171 /* 0xab */,
+ HAL_TX_MPDU_QUEUE_HEAD = 172 /* 0xac */,
+ HAL_TX_MPDU_QUEUE_EXT = 173 /* 0xad */,
+ HAL_TX_MPDU_QUEUE_EXT_PTR = 174 /* 0xae */,
+ HAL_TX_MSDU_DETAILS = 175 /* 0xaf */,
+ HAL_TX_MSDU_EXTENSION = 176 /* 0xb0 */,
+ HAL_TX_MSDU_FLOW = 177 /* 0xb1 */,
+ HAL_TX_MSDU_LINK = 178 /* 0xb2 */,
+ HAL_TX_MSDU_LINK_ENTRY_PTR = 179 /* 0xb3 */,
+ HAL_RESPONSE_RATE_SETTING = 180 /* 0xb4 */,
+ HAL_TXPCU_BUFFER_BASICS = 181 /* 0xb5 */,
+ HAL_UNIFORM_DESCRIPTOR_HEADER = 182 /* 0xb6 */,
+ HAL_UNIFORM_TQM_CMD_HEADER = 183 /* 0xb7 */,
+ HAL_UNIFORM_TQM_STATUS_HEADER = 184 /* 0xb8 */,
+ HAL_USER_RATE_SETTING = 185 /* 0xb9 */,
+ HAL_WBM_BUFFER_RING = 186 /* 0xba */,
+ HAL_WBM_LINK_DESCRIPTOR_RING = 187 /* 0xbb */,
+ HAL_WBM_RELEASE_RING = 188 /* 0xbc */,
+ HAL_TX_FLUSH_REQ = 189 /* 0xbd */,
+ HAL_RX_MSDU_DETAILS = 190 /* 0xbe */,
+ HAL_TQM_WRITE_CMD_STATUS = 191 /* 0xbf */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS = 192 /* 0xc0 */,
+ HAL_TQM_GET_MSDU_FLOW_STATS = 193 /* 0xc1 */,
+ HAL_EXAMPLE_USER_CTLV_32 = 194 /* 0xc2 */,
+ HAL_TX_FES_STATUS_START = 195 /* 0xc3 */,
+ HAL_TX_FES_STATUS_USER_PPDU = 196 /* 0xc4 */,
+ HAL_TX_FES_STATUS_USER_RESPONSE = 197 /* 0xc5 */,
+ HAL_TX_FES_STATUS_END = 198 /* 0xc6 */,
+ HAL_RX_TRIG_INFO = 199 /* 0xc7 */,
+ HAL_RXPCU_TX_SETUP_CLEAR = 200 /* 0xc8 */,
+ HAL_RX_FRAME_BITMAP_REQ = 201 /* 0xc9 */,
+ HAL_RX_FRAME_BITMAP_ACK = 202 /* 0xca */,
+ HAL_COEX_RX_STATUS = 203 /* 0xcb */,
+ HAL_RX_START_PARAM = 204 /* 0xcc */,
+ HAL_RX_PPDU_START = 205 /* 0xcd */,
+ HAL_RX_PPDU_END = 206 /* 0xce */,
+ HAL_RX_MPDU_START = 207 /* 0xcf */,
+ HAL_RX_MPDU_END = 208 /* 0xd0 */,
+ HAL_RX_MSDU_START = 209 /* 0xd1 */,
+ HAL_RX_MSDU_END = 210 /* 0xd2 */,
+ HAL_RX_ATTENTION = 211 /* 0xd3 */,
+ HAL_RECEIVED_RESPONSE_INFO = 212 /* 0xd4 */,
+ HAL_RX_PHY_SLEEP = 213 /* 0xd5 */,
+ HAL_RX_HEADER = 214 /* 0xd6 */,
+ HAL_RX_PEER_ENTRY = 215 /* 0xd7 */,
+ HAL_RX_FLUSH = 216 /* 0xd8 */,
+ HAL_RX_RESPONSE_REQUIRED_INFO = 217 /* 0xd9 */,
+ HAL_RX_FRAMELESS_BAR_DETAILS = 218 /* 0xda */,
+ HAL_TQM_GET_MPDU_QUEUE_STATS_STATUS = 219 /* 0xdb */,
+ HAL_TQM_GET_MSDU_FLOW_STATS_STATUS = 220 /* 0xdc */,
+ HAL_TX_CBF_INFO = 221 /* 0xdd */,
+ HAL_PCU_PPDU_SETUP_USER = 222 /* 0xde */,
+ HAL_RX_MPDU_PCU_START = 223 /* 0xdf */,
+ HAL_RX_PM_INFO = 224 /* 0xe0 */,
+ HAL_RX_USER_PPDU_END = 225 /* 0xe1 */,
+ HAL_RX_PRE_PPDU_START = 226 /* 0xe2 */,
+ HAL_RX_PREAMBLE = 227 /* 0xe3 */,
+ HAL_TX_FES_SETUP_COMPLETE = 228 /* 0xe4 */,
+ HAL_TX_LAST_MPDU_FETCHED = 229 /* 0xe5 */,
+ HAL_TXDMA_STOP_REQUEST = 230 /* 0xe6 */,
+ HAL_RXPCU_SETUP = 231 /* 0xe7 */,
+ HAL_RXPCU_USER_SETUP = 232 /* 0xe8 */,
+ HAL_TX_FES_STATUS_ACK_OR_BA = 233 /* 0xe9 */,
+ HAL_TQM_ACKED_MPDU = 234 /* 0xea */,
+ HAL_COEX_TX_RESP = 235 /* 0xeb */,
+ HAL_COEX_TX_STATUS = 236 /* 0xec */,
+ HAL_MACTX_COEX_PHY_CTRL = 237 /* 0xed */,
+ HAL_COEX_STATUS_BROADCAST = 238 /* 0xee */,
+ HAL_RESPONSE_START_STATUS = 239 /* 0xef */,
+ HAL_RESPONSE_END_STATUS = 240 /* 0xf0 */,
+ HAL_CRYPTO_STATUS = 241 /* 0xf1 */,
+ HAL_RECEIVED_TRIGGER_INFO = 242 /* 0xf2 */,
+ HAL_REO_ENTRANCE_RING = 243 /* 0xf3 */,
+ HAL_RX_MPDU_LINK = 244 /* 0xf4 */,
+ HAL_COEX_TX_STOP_CTRL = 245 /* 0xf5 */,
+ HAL_RX_PPDU_ACK_REPORT = 246 /* 0xf6 */,
+ HAL_RX_PPDU_NO_ACK_REPORT = 247 /* 0xf7 */,
+ HAL_SCH_COEX_STATUS = 248 /* 0xf8 */,
+ HAL_SCHEDULER_COMMAND_STATUS = 249 /* 0xf9 */,
+ HAL_SCHEDULER_RX_PPDU_NO_RESPONSE_STATUS = 250 /* 0xfa */,
+ HAL_TX_FES_STATUS_PROT = 251 /* 0xfb */,
+ HAL_TX_FES_STATUS_START_PPDU = 252 /* 0xfc */,
+ HAL_TX_FES_STATUS_START_PROT = 253 /* 0xfd */,
+ HAL_TXPCU_PHYTX_DEBUG32 = 254 /* 0xfe */,
+ HAL_TXPCU_PHYTX_OTHER_TRANSMIT_INFO32 = 255 /* 0xff */,
+ HAL_TX_MPDU_COUNT_TRANSFER_END = 256 /* 0x100 */,
+ HAL_WHO_ANCHOR_OFFSET = 257 /* 0x101 */,
+ HAL_WHO_ANCHOR_VALUE = 258 /* 0x102 */,
+ HAL_WHO_CCE_INFO = 259 /* 0x103 */,
+ HAL_WHO_COMMIT = 260 /* 0x104 */,
+ HAL_WHO_COMMIT_DONE = 261 /* 0x105 */,
+ HAL_WHO_FLUSH = 262 /* 0x106 */,
+ HAL_WHO_L2_LLC = 263 /* 0x107 */,
+ HAL_WHO_L2_PAYLOAD = 264 /* 0x108 */,
+ HAL_WHO_L3_CHECKSUM = 265 /* 0x109 */,
+ HAL_WHO_L3_INFO = 266 /* 0x10a */,
+ HAL_WHO_L4_CHECKSUM = 267 /* 0x10b */,
+ HAL_WHO_L4_INFO = 268 /* 0x10c */,
+ HAL_WHO_MSDU = 269 /* 0x10d */,
+ HAL_WHO_MSDU_MISC = 270 /* 0x10e */,
+ HAL_WHO_PACKET_DATA = 271 /* 0x10f */,
+ HAL_WHO_PACKET_HDR = 272 /* 0x110 */,
+ HAL_WHO_PPDU_END = 273 /* 0x111 */,
+ HAL_WHO_PPDU_START = 274 /* 0x112 */,
+ HAL_WHO_TSO = 275 /* 0x113 */,
+ HAL_WHO_WMAC_HEADER_PV0 = 276 /* 0x114 */,
+ HAL_WHO_WMAC_HEADER_PV1 = 277 /* 0x115 */,
+ HAL_WHO_WMAC_IV = 278 /* 0x116 */,
+ HAL_MPDU_INFO_END = 279 /* 0x117 */,
+ HAL_MPDU_INFO_BITMAP = 280 /* 0x118 */,
+ HAL_TX_QUEUE_EXTENSION = 281 /* 0x119 */,
+ HAL_RX_PEER_ENTRY_DETAILS = 282 /* 0x11a */,
+ HAL_RX_REO_QUEUE_REFERENCE = 283 /* 0x11b */,
+ HAL_RX_REO_QUEUE_EXT = 284 /* 0x11c */,
+ HAL_SCHEDULER_SELFGEN_RESPONSE_STATUS = 285 /* 0x11d */,
+ HAL_TQM_UPDATE_TX_MPDU_COUNT_STATUS = 286 /* 0x11e */,
+ HAL_TQM_ACKED_MPDU_STATUS = 287 /* 0x11f */,
+ HAL_TQM_ADD_MSDU_STATUS = 288 /* 0x120 */,
+ HAL_RX_MPDU_LINK_PTR = 289 /* 0x121 */,
+ HAL_REO_DESTINATION_RING = 290 /* 0x122 */,
+ HAL_TQM_LIST_GEN_DONE = 291 /* 0x123 */,
+ HAL_WHO_TERMINATE = 292 /* 0x124 */,
+ HAL_TX_LAST_MPDU_END = 293 /* 0x125 */,
+ HAL_TX_CV_DATA = 294 /* 0x126 */,
+ HAL_TCL_ENTRANCE_FROM_PPE_RING = 295 /* 0x127 */,
+ HAL_PPDU_TX_END = 296 /* 0x128 */,
+ HAL_PROT_TX_END = 297 /* 0x129 */,
+ HAL_PDG_RESPONSE_RATE_SETTING = 298 /* 0x12a */,
+ HAL_MPDU_INFO_GLOBAL_END = 299 /* 0x12b */,
+ HAL_TQM_SCH_INSTR_GLOBAL_END = 300 /* 0x12c */,
+ HAL_RX_PPDU_END_USER_STATS = 301 /* 0x12d */,
+ HAL_RX_PPDU_END_USER_STATS_EXT = 302 /* 0x12e */,
+ HAL_NO_ACK_REPORT = 303 /* 0x12f */,
+ HAL_ACK_REPORT = 304 /* 0x130 */,
+ HAL_UNIFORM_REO_CMD_HEADER = 305 /* 0x131 */,
+ HAL_REO_GET_QUEUE_STATS = 306 /* 0x132 */,
+ HAL_REO_FLUSH_QUEUE = 307 /* 0x133 */,
+ HAL_REO_FLUSH_CACHE = 308 /* 0x134 */,
+ HAL_REO_UNBLOCK_CACHE = 309 /* 0x135 */,
+ HAL_UNIFORM_REO_STATUS_HEADER = 310 /* 0x136 */,
+ HAL_REO_GET_QUEUE_STATS_STATUS = 311 /* 0x137 */,
+ HAL_REO_FLUSH_QUEUE_STATUS = 312 /* 0x138 */,
+ HAL_REO_FLUSH_CACHE_STATUS = 313 /* 0x139 */,
+ HAL_REO_UNBLOCK_CACHE_STATUS = 314 /* 0x13a */,
+ HAL_TQM_FLUSH_CACHE = 315 /* 0x13b */,
+ HAL_TQM_UNBLOCK_CACHE = 316 /* 0x13c */,
+ HAL_TQM_FLUSH_CACHE_STATUS = 317 /* 0x13d */,
+ HAL_TQM_UNBLOCK_CACHE_STATUS = 318 /* 0x13e */,
+ HAL_RX_PPDU_END_STATUS_DONE = 319 /* 0x13f */,
+ HAL_RX_STATUS_BUFFER_DONE = 320 /* 0x140 */,
+ HAL_BUFFER_ADDR_INFO = 321 /* 0x141 */,
+ HAL_RX_MSDU_DESC_INFO = 322 /* 0x142 */,
+ HAL_RX_MPDU_DESC_INFO = 323 /* 0x143 */,
+ HAL_TCL_DATA_CMD = 324 /* 0x144 */,
+ HAL_TCL_GSE_CMD = 325 /* 0x145 */,
+ HAL_TCL_EXIT_BASE = 326 /* 0x146 */,
+ HAL_TCL_COMPACT_EXIT_RING = 327 /* 0x147 */,
+ HAL_TCL_REGULAR_EXIT_RING = 328 /* 0x148 */,
+ HAL_TCL_EXTENDED_EXIT_RING = 329 /* 0x149 */,
+ HAL_UPLINK_COMMON_INFO = 330 /* 0x14a */,
+ HAL_UPLINK_USER_SETUP_INFO = 331 /* 0x14b */,
+ HAL_TX_DATA_SYNC = 332 /* 0x14c */,
+ HAL_PHYRX_CBF_READ_REQUEST_ACK = 333 /* 0x14d */,
+ HAL_TCL_STATUS_RING = 334 /* 0x14e */,
+ HAL_TQM_GET_MPDU_HEAD_INFO = 335 /* 0x14f */,
+ HAL_TQM_SYNC_CMD = 336 /* 0x150 */,
+ HAL_TQM_GET_MPDU_HEAD_INFO_STATUS = 337 /* 0x151 */,
+ HAL_TQM_SYNC_CMD_STATUS = 338 /* 0x152 */,
+ HAL_TQM_THRESHOLD_DROP_NOTIFICATION_STATUS = 339 /* 0x153 */,
+ HAL_TQM_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 340 /* 0x154 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST = 341 /* 0x155 */,
+ HAL_REO_FLUSH_TIMEOUT_LIST_STATUS = 342 /* 0x156 */,
+ HAL_REO_TO_PPE_RING = 343 /* 0x157 */,
+ HAL_RX_MPDU_INFO = 344 /* 0x158 */,
+ HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS = 345 /* 0x159 */,
+ HAL_SCHEDULER_RX_SIFS_RESPONSE_TRIGGER_STATUS = 346 /* 0x15a */,
+ HAL_EXAMPLE_USER_TLV_32_NAME = 347 /* 0x15b */,
+ HAL_RX_PPDU_START_USER_INFO = 348 /* 0x15c */,
+ HAL_RX_RXPCU_CLASSIFICATION_OVERVIEW = 349 /* 0x15d */,
+ HAL_RX_RING_MASK = 350 /* 0x15e */,
+ HAL_WHO_CLASSIFY_INFO = 351 /* 0x15f */,
+ HAL_TXPT_CLASSIFY_INFO = 352 /* 0x160 */,
+ HAL_RXPT_CLASSIFY_INFO = 353 /* 0x161 */,
+ HAL_TX_FLOW_SEARCH_ENTRY = 354 /* 0x162 */,
+ HAL_RX_FLOW_SEARCH_ENTRY = 355 /* 0x163 */,
+ HAL_RECEIVED_TRIGGER_INFO_DETAILS = 356 /* 0x164 */,
+ HAL_COEX_MAC_NAP = 357 /* 0x165 */,
+ HAL_MACRX_ABORT_REQUEST_INFO = 358 /* 0x166 */,
+ HAL_MACTX_ABORT_REQUEST_INFO = 359 /* 0x167 */,
+ HAL_PHYRX_ABORT_REQUEST_INFO = 360 /* 0x168 */,
+ HAL_PHYTX_ABORT_REQUEST_INFO = 361 /* 0x169 */,
+ HAL_RXPCU_PPDU_END_INFO = 362 /* 0x16a */,
+ HAL_WHO_MESH_CONTROL = 363 /* 0x16b */,
+ HAL_L_SIG_A_INFO = 364 /* 0x16c */,
+ HAL_L_SIG_B_INFO = 365 /* 0x16d */,
+ HAL_HT_SIG_INFO = 366 /* 0x16e */,
+ HAL_VHT_SIG_A_INFO = 367 /* 0x16f */,
+ HAL_VHT_SIG_B_SU20_INFO = 368 /* 0x170 */,
+ HAL_VHT_SIG_B_SU40_INFO = 369 /* 0x171 */,
+ HAL_VHT_SIG_B_SU80_INFO = 370 /* 0x172 */,
+ HAL_VHT_SIG_B_SU160_INFO = 371 /* 0x173 */,
+ HAL_VHT_SIG_B_MU20_INFO = 372 /* 0x174 */,
+ HAL_VHT_SIG_B_MU40_INFO = 373 /* 0x175 */,
+ HAL_VHT_SIG_B_MU80_INFO = 374 /* 0x176 */,
+ HAL_VHT_SIG_B_MU160_INFO = 375 /* 0x177 */,
+ HAL_SERVICE_INFO = 376 /* 0x178 */,
+ HAL_HE_SIG_A_SU_INFO = 377 /* 0x179 */,
+ HAL_HE_SIG_A_MU_DL_INFO = 378 /* 0x17a */,
+ HAL_HE_SIG_A_MU_UL_INFO = 379 /* 0x17b */,
+ HAL_HE_SIG_B1_MU_INFO = 380 /* 0x17c */,
+ HAL_HE_SIG_B2_MU_INFO = 381 /* 0x17d */,
+ HAL_HE_SIG_B2_OFDMA_INFO = 382 /* 0x17e */,
+ HAL_PDG_SW_MODE_BW_START = 383 /* 0x17f */,
+ HAL_PDG_SW_MODE_BW_END = 384 /* 0x180 */,
+ HAL_PDG_WAIT_FOR_MAC_REQUEST = 385 /* 0x181 */,
+ HAL_PDG_WAIT_FOR_PHY_REQUEST = 386 /* 0x182 */,
+ HAL_SCHEDULER_END = 387 /* 0x183 */,
+ HAL_PEER_TABLE_ENTRY = 388 /* 0x184 */,
+ HAL_SW_PEER_INFO = 389 /* 0x185 */,
+ HAL_RXOLE_CCE_CLASSIFY_INFO = 390 /* 0x186 */,
+ HAL_TCL_CCE_CLASSIFY_INFO = 391 /* 0x187 */,
+ HAL_RXOLE_CCE_INFO = 392 /* 0x188 */,
+ HAL_TCL_CCE_INFO = 393 /* 0x189 */,
+ HAL_TCL_CCE_SUPERRULE = 394 /* 0x18a */,
+ HAL_CCE_RULE = 395 /* 0x18b */,
+ HAL_RX_PPDU_START_DROPPED = 396 /* 0x18c */,
+ HAL_RX_PPDU_END_DROPPED = 397 /* 0x18d */,
+ HAL_RX_PPDU_END_STATUS_DONE_DROPPED = 398 /* 0x18e */,
+ HAL_RX_MPDU_START_DROPPED = 399 /* 0x18f */,
+ HAL_RX_MSDU_START_DROPPED = 400 /* 0x190 */,
+ HAL_RX_MSDU_END_DROPPED = 401 /* 0x191 */,
+ HAL_RX_MPDU_END_DROPPED = 402 /* 0x192 */,
+ HAL_RX_ATTENTION_DROPPED = 403 /* 0x193 */,
+ HAL_TXPCU_USER_SETUP = 404 /* 0x194 */,
+ HAL_RXPCU_USER_SETUP_EXT = 405 /* 0x195 */,
+ HAL_CE_SRC_DESC = 406 /* 0x196 */,
+ HAL_CE_STAT_DESC = 407 /* 0x197 */,
+ HAL_RXOLE_CCE_SUPERRULE = 408 /* 0x198 */,
+ HAL_TX_RATE_STATS_INFO = 409 /* 0x199 */,
+ HAL_CMD_PART_0_END = 410 /* 0x19a */,
+ HAL_MACTX_SYNTH_ON = 411 /* 0x19b */,
+ HAL_SCH_CRITICAL_TLV_REFERENCE = 412 /* 0x19c */,
+ HAL_TQM_MPDU_GLOBAL_START = 413 /* 0x19d */,
+ HAL_EXAMPLE_TLV_32 = 414 /* 0x19e */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW = 415 /* 0x19f */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD = 416 /* 0x1a0 */,
+ HAL_TQM_UPDATE_TX_MSDU_FLOW_STATUS = 417 /* 0x1a1 */,
+ HAL_TQM_UPDATE_TX_MPDU_QUEUE_HEAD_STATUS = 418 /* 0x1a2 */,
+ HAL_REO_UPDATE_RX_REO_QUEUE = 419 /* 0x1a3 */,
+ HAL_CE_DST_DESC = 420 /* 0x1a4 */,
+ HAL_TLV_BASE = 511 /* 0x1ff */,
+};
+
+#define HAL_TLV_HDR_TAG GENMASK(9, 1)
+#define HAL_TLV_HDR_LEN GENMASK(25, 10)
+#define HAL_TLV_USR_ID GENMASK(31, 26)
+
+#define HAL_TLV_ALIGN 4
+
+struct hal_tlv_hdr {
+ u32 tl;
+ u8 value[];
+} __packed;
+
+#define RX_MPDU_DESC_INFO0_MSDU_COUNT GENMASK(7, 0)
+#define RX_MPDU_DESC_INFO0_SEQ_NUM GENMASK(19, 8)
+#define RX_MPDU_DESC_INFO0_FRAG_FLAG BIT(20)
+#define RX_MPDU_DESC_INFO0_MPDU_RETRY BIT(21)
+#define RX_MPDU_DESC_INFO0_AMPDU_FLAG BIT(22)
+#define RX_MPDU_DESC_INFO0_BAR_FRAME BIT(23)
+#define RX_MPDU_DESC_INFO0_VALID_PN BIT(24)
+#define RX_MPDU_DESC_INFO0_VALID_SA BIT(25)
+#define RX_MPDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(26)
+#define RX_MPDU_DESC_INFO0_VALID_DA BIT(27)
+#define RX_MPDU_DESC_INFO0_DA_MCBC BIT(28)
+#define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(29)
+#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(30)
+
+#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+
+struct rx_mpdu_desc {
+ u32 info0; /* %RX_MPDU_DESC_INFO */
+ u32 meta_data;
+} __packed;
+
+/* rx_mpdu_desc
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * msdu_count
+ * The number of MSDUs within the MPDU
+ *
+ * mpdu_sequence_number
+ * The field can have two different meanings based on the setting
+ * of field 'bar_frame'. If 'bar_frame' is set, it means the MPDU
+ * start sequence number from the BAR frame otherwise it means
+ * the MPDU sequence number of the received frame.
+ *
+ * fragment_flag
+ * When set, this MPDU is a fragment and REO should forward this
+ * fragment MPDU to the REO destination ring without any reorder
+ * checks, pn checks or bitmap update. This implies that REO is
+ * forwarding the pointer to the MSDU link descriptor.
+ *
+ * mpdu_retry_bit
+ * The retry bit setting from the MPDU header of the received frame
+ *
+ * ampdu_flag
+ * Indicates the MPDU was received as part of an A-MPDU.
+ *
+ * bar_frame
+ * Indicates the received frame is a BAR frame. After processing,
+ * this frame shall be pushed to SW or deleted.
+ *
+ * valid_pn
+ * When not set, REO will not perform a PN sequence number check.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for all MSDUs in this MPDU.
+ *
+ * sa_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC source address search due to the expiration of search timer.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for all MSDUs in this MPDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates at least one of
+ * the DA addresses is a Multicast or Broadcast address.
+ *
+ * da_idx_timeout
+ * Indicates, at least 1 MSDU within the MPDU has an unsuccessful
+ * MAC destination address search due to the expiration of search
+ * timer.
+ *
+ * raw_mpdu
+ * Field only valid when first_msdu_in_mpdu_flag is set. Indicates
+ * the contents in the MSDU buffer contains a 'RAW' MPDU.
+ */
+
+enum hal_rx_msdu_desc_reo_dest_ind {
+ HAL_RX_MSDU_DESC_REO_DEST_IND_TCL,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW1,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW2,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW3,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_SW4,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_RELEASE,
+ HAL_RX_MSDU_DESC_REO_DEST_IND_FW,
+};
+
+#define RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU BIT(0)
+#define RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU BIT(1)
+#define RX_MSDU_DESC_INFO0_MSDU_CONTINUATION BIT(2)
+#define RX_MSDU_DESC_INFO0_MSDU_LENGTH GENMASK(16, 3)
+#define RX_MSDU_DESC_INFO0_REO_DEST_IND GENMASK(21, 17)
+#define RX_MSDU_DESC_INFO0_MSDU_DROP BIT(22)
+#define RX_MSDU_DESC_INFO0_VALID_SA BIT(23)
+#define RX_MSDU_DESC_INFO0_SA_IDX_TIMEOUT BIT(24)
+#define RX_MSDU_DESC_INFO0_VALID_DA BIT(25)
+#define RX_MSDU_DESC_INFO0_DA_MCBC BIT(26)
+#define RX_MSDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(27)
+
+#define HAL_RX_MSDU_PKT_LENGTH_GET(val) \
+ (FIELD_GET(RX_MSDU_DESC_INFO0_MSDU_LENGTH, (val)))
+
+struct rx_msdu_desc {
+ u32 info0;
+ u32 rsvd0;
+} __packed;
+
+/* rx_msdu_desc
+ *
+ * first_msdu_in_mpdu
+ * Indicates first msdu in mpdu.
+ *
+ * last_msdu_in_mpdu
+ * Indicates last msdu in mpdu. This flag can be true only when
+ * 'Msdu_continuation' set to 0. This implies that when an msdu
+ * is spread out over multiple buffers and thus msdu_continuation
+ * is set, only for the very last buffer of the msdu, can the
+ * 'last_msdu_in_mpdu' be set.
+ *
+ * When both first_msdu_in_mpdu and last_msdu_in_mpdu are set,
+ * the MPDU that this MSDU belongs to only contains a single MSDU.
+ *
+ * msdu_continuation
+ * When set, this MSDU buffer was not able to hold the entire MSDU.
+ * The next buffer will therefore contain additional information
+ * related to this MSDU.
+ *
+ * msdu_length
+ * Field is only valid in combination with the 'first_msdu_in_mpdu'
+ * being set. Full MSDU length in bytes after decapsulation. This
+ * field is still valid for MPDU frames without A-MSDU. It still
+ * represents MSDU length after decapsulation Or in case of RAW
+ * MPDUs, it indicates the length of the entire MPDU (without FCS
+ * field).
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * msdu_drop
+ * Indicates that REO shall drop this MSDU and not forward it to
+ * any other ring.
+ *
+ * valid_sa
+ * Indicates OLE found a valid SA entry for this MSDU.
+ *
+ * sa_idx_timeout
+ * Indicates, an unsuccessful MAC source address search due to
+ * the expiration of search timer for this MSDU.
+ *
+ * valid_da
+ * When set, OLE found a valid DA entry for this MSDU.
+ *
+ * da_mcbc
+ * Field Only valid if valid_da is set. Indicates the DA address
+ * is a Multicast or Broadcast address for this MSDU.
+ *
+ * da_idx_timeout
+ * Indicates, an unsuccessful MAC destination address search due
+ * to the expiration of search timer for this MSDU.
+ */
+
+enum hal_reo_dest_ring_buffer_type {
+ HAL_REO_DEST_RING_BUFFER_TYPE_MSDU,
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC,
+};
+
+enum hal_reo_dest_ring_push_reason {
+ HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED,
+ HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION,
+};
+
+enum hal_reo_dest_ring_error_code {
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_INVALID,
+ HAL_REO_DEST_RING_ERROR_CODE_AMPDU_IN_NON_BA,
+ HAL_REO_DEST_RING_ERROR_CODE_NON_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_BA_DUPLICATE,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_2K_JUMP,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_BAR_OOR,
+ HAL_REO_DEST_RING_ERROR_CODE_NO_BA_SESSION,
+ HAL_REO_DEST_RING_ERROR_CODE_FRAME_SN_EQUALS_SSN,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED,
+ HAL_REO_DEST_RING_ERROR_CODE_2K_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_PN_ERR_FLAG_SET,
+ HAL_REO_DEST_RING_ERROR_CODE_DESC_BLOCKED,
+ HAL_REO_DEST_RING_ERROR_CODE_MAX,
+};
+
+#define HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_DEST_RING_INFO0_BUFFER_TYPE BIT(8)
+#define HAL_REO_DEST_RING_INFO0_PUSH_REASON GENMASK(10, 9)
+#define HAL_REO_DEST_RING_INFO0_ERROR_CODE GENMASK(15, 11)
+#define HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM GENMASK(31, 16)
+
+#define HAL_REO_DEST_RING_INFO1_REORDER_INFO_VALID BIT(0)
+#define HAL_REO_DEST_RING_INFO1_REORDER_OPCODE GENMASK(4, 1)
+#define HAL_REO_DEST_RING_INFO1_REORDER_SLOT_IDX GENMASK(12, 5)
+
+#define HAL_REO_DEST_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_REO_DEST_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_reo_dest_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct rx_msdu_desc rx_msdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_DEST_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_DEST_RING_INFO1_ */
+ u32 rsvd0;
+ u32 rsvd1;
+ u32 rsvd2;
+ u32 rsvd3;
+ u32 rsvd4;
+ u32 rsvd5;
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+} __packed;
+
+/* hal_reo_dest_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO/SW/FW
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * rx_msdu_info
+ * General information related to the MSDU that is passed
+ * on from RXDMA all the way to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * buffer_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * rx_queue_num
+ * Indicates the REO MPDU reorder queue id from which this frame
+ * originated.
+ *
+ * reorder_info_valid
+ * When set, REO has been instructed to not perform the actual
+ * re-ordering of frames for this queue, but just to insert
+ * the reorder opcodes.
+ *
+ * reorder_opcode
+ * Field is valid when 'reorder_info_valid' is set. This field is
+ * always valid for debug purpose as well.
+ *
+ * reorder_slot_idx
+ * Valid only when 'reorder_info_valid' is set.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+enum hal_reo_entr_rxdma_ecode {
+ HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FCS_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_UNECRYPTED_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LEN_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MSDU_LIMIT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_WIFI_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_AMSDU_PARSE_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_SA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_DA_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLOW_TIMEOUT_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR,
+ HAL_REO_ENTR_RING_RXDMA_ECODE_MAX,
+};
+
+#define HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_ENTR_RING_INFO0_MPDU_BYTE_COUNT GENMASK(21, 8)
+#define HAL_REO_ENTR_RING_INFO0_DEST_IND GENMASK(26, 22)
+#define HAL_REO_ENTR_RING_INFO0_FRAMELESS_BAR BIT(27)
+
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE GENMASK(6, 2)
+
+struct hal_reo_entrance_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ u32 queue_addr_lo;
+ u32 info0; /* %HAL_REO_ENTR_RING_INFO0_ */
+ u32 info1; /* %HAL_REO_ENTR_RING_INFO1_ */
+ u32 info2; /* %HAL_REO_DEST_RING_INFO2_ */
+
+} __packed;
+
+/* hal_reo_entrance_ring
+ *
+ * Producer: RXDMA
+ * Consumer: REO
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * rx_mpdu_info
+ * General information related to the MPDU that is passed
+ * on from REO entrance ring to the REO destination ring.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * mpdu_byte_count
+ * An approximation of the number of bytes received in this MPDU.
+ * Used to keeps stats on the amount of data flowing
+ * through a queue.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * frameless_bar
+ * Indicates that this REO entrance ring struct contains BAR info
+ * from a multi TID BAR frame. The original multi TID BAR frame
+ * itself contained all the REO info for the first TID, but all
+ * the subsequent TID info and their linkage to the REO descriptors
+ * is passed down as 'frameless' BAR info.
+ *
+ * The only fields valid in this descriptor when this bit is set
+ * are queue_addr_lo, queue_addr_hi, mpdu_sequence_number,
+ * bar_frame and peer_meta_data.
+ *
+ * rxdma_push_reason
+ * Reason for pushing this frame to this exit ring. Values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Valid only when 'push_reason' is set. All error codes are
+ * defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * ring_id
+ * The buffer pointer ring id.
+ * 0 - Idle ring
+ * 1 - N refers to other rings.
+ *
+ * looping_count
+ * Indicates the number of times the producer of entries into
+ * this ring has looped around the ring.
+ */
+
+#define HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON GENMASK(1, 0)
+#define HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE GENMASK(6, 2)
+#define HAL_SW_MON_RING_INFO0_MPDU_FRAG_NUMBER GENMASK(10, 7)
+#define HAL_SW_MON_RING_INFO0_FRAMELESS_BAR BIT(11)
+#define HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT GENMASK(15, 12)
+#define HAL_SW_MON_RING_INFO0_END_OF_PPDU BIT(16)
+
+#define HAL_SW_MON_RING_INFO1_PHY_PPDU_ID GENMASK(15, 0)
+#define HAL_SW_MON_RING_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_SW_MON_RING_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_sw_monitor_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_mpdu_desc rx_mpdu_info;
+ struct ath11k_buffer_addr status_buf_addr_info;
+ u32 info0;
+ u32 info1;
+} __packed;
+
+#define HAL_REO_CMD_HDR_INFO0_CMD_NUMBER GENMASK(15, 0)
+#define HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED BIT(16)
+
+struct hal_reo_cmd_hdr {
+ u32 info0;
+} __packed;
+
+#define HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS BIT(8)
+
+struct hal_reo_get_queue_stats {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+/* hal_reo_get_queue_stats
+ * Producer: SW
+ * Consumer: REO
+ *
+ * cmd
+ * Details for command execution tracking purposes.
+ *
+ * queue_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * queue_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * clear_stats
+ * Clear stats settings. When set, Clear the stats after
+ * generating the status.
+ *
+ * Following stats will be cleared.
+ * Timeout_count
+ * Forward_due_to_bar_count
+ * Duplicate_count
+ * Frames_in_order_count
+ * BAR_received_count
+ * MPDU_Frames_processed_count
+ * MSDU_Frames_processed_count
+ * Total_processed_byte_count
+ * Late_receive_MPDU_count
+ * window_jump_2k
+ * Hole_count
+ */
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_DESC_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_DESC_ADDR BIT(8)
+#define HAL_REO_FLUSH_QUEUE_INFO0_BLOCK_RESRC_IDX GENMASK(10, 9)
+
+struct hal_reo_flush_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 desc_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS BIT(8)
+#define HAL_REO_FLUSH_CACHE_INFO0_RELEASE_BLOCK_IDX BIT(9)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX GENMASK(11, 10)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE BIT(12)
+#define HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE BIT(13)
+#define HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL BIT(14)
+
+struct hal_reo_flush_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 cache_addr_lo;
+ u32 info0;
+ u32 rsvd0[6];
+} __packed;
+
+#define HAL_TCL_DATA_CMD_INFO0_DESC_TYPE BIT(0)
+#define HAL_TCL_DATA_CMD_INFO0_EPD BIT(1)
+#define HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE GENMASK(3, 2)
+#define HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE GENMASK(7, 4)
+#define HAL_TCL_DATA_CMD_INFO0_SRC_BUF_SWAP BIT(8)
+#define HAL_TCL_DATA_CMD_INFO0_LNK_META_SWAP BIT(9)
+#define HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE GENMASK(13, 12)
+#define HAL_TCL_DATA_CMD_INFO0_ADDR_EN GENMASK(15, 14)
+#define HAL_TCL_DATA_CMD_INFO0_CMD_NUM GENMASK(31, 16)
+
+#define HAL_TCL_DATA_CMD_INFO1_DATA_LEN GENMASK(15, 0)
+#define HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN BIT(16)
+#define HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN BIT(17)
+#define HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN BIT(18)
+#define HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN BIT(19)
+#define HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN BIT(20)
+#define HAL_TCL_DATA_CMD_INFO1_TO_FW BIT(21)
+#define HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET GENMASK(31, 23)
+
+#define HAL_TCL_DATA_CMD_INFO2_BUF_TIMESTAMP GENMASK(18, 0)
+#define HAL_TCL_DATA_CMD_INFO2_BUF_T_VALID BIT(19)
+#define HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE BIT(20)
+#define HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE BIT(21)
+#define HAL_TCL_DATA_CMD_INFO2_TID GENMASK(25, 22)
+#define HAL_TCL_DATA_CMD_INFO2_LMAC_ID GENMASK(27, 26)
+
+#define HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX GENMASK(5, 0)
+#define HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX GENMASK(25, 6)
+#define HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM GENMASK(29, 26)
+#define HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE GENMASK(31, 30)
+
+#define HAL_TCL_DATA_CMD_INFO4_RING_ID GENMASK(27, 20)
+#define HAL_TCL_DATA_CMD_INFO4_LOOPING_COUNT GENMASK(31, 28)
+
+enum hal_encrypt_type {
+ HAL_ENCRYPT_TYPE_WEP_40,
+ HAL_ENCRYPT_TYPE_WEP_104,
+ HAL_ENCRYPT_TYPE_TKIP_NO_MIC,
+ HAL_ENCRYPT_TYPE_WEP_128,
+ HAL_ENCRYPT_TYPE_TKIP_MIC,
+ HAL_ENCRYPT_TYPE_WAPI,
+ HAL_ENCRYPT_TYPE_CCMP_128,
+ HAL_ENCRYPT_TYPE_OPEN,
+ HAL_ENCRYPT_TYPE_CCMP_256,
+ HAL_ENCRYPT_TYPE_GCMP_128,
+ HAL_ENCRYPT_TYPE_AES_GCMP_256,
+ HAL_ENCRYPT_TYPE_WAPI_GCM_SM4,
+};
+
+enum hal_tcl_encap_type {
+ HAL_TCL_ENCAP_TYPE_RAW,
+ HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
+ HAL_TCL_ENCAP_TYPE_ETHERNET,
+ HAL_TCL_ENCAP_TYPE_802_3 = 3,
+};
+
+enum hal_tcl_desc_type {
+ HAL_TCL_DESC_TYPE_BUFFER,
+ HAL_TCL_DESC_TYPE_EXT_DESC,
+};
+
+enum hal_wbm_htt_tx_comp_status {
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_OK,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
+ HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+};
+
+struct hal_tcl_data_cmd {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+} __packed;
+
+/* hal_tcl_data_cmd
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ *
+ * desc_type
+ * Indicates the type of address provided in the buf_addr_info.
+ * Values are defined in enum %HAL_REO_DEST_RING_BUFFER_TYPE_.
+ *
+ * epd
+ * When this bit is set then input packet is an EPD type.
+ *
+ * encap_type
+ * Indicates the encapsulation that HW will perform. Values are
+ * defined in enum %HAL_TCL_ENCAP_TYPE_.
+ *
+ * encrypt_type
+ * Field only valid for encap_type: RAW
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * src_buffer_swap
+ * Treats source memory (packet buffer) organization as big-endian.
+ * 1'b0: Source memory is little endian
+ * 1'b1: Source memory is big endian
+ *
+ * link_meta_swap
+ * Treats link descriptor and Metadata as big-endian.
+ * 1'b0: memory is little endian
+ * 1'b1: memory is big endian
+ *
+ * search_type
+ * Search type select
+ * 0 - Normal search, 1 - Index based address search,
+ * 2 - Index based flow search
+ *
+ * addrx_en
+ * addry_en
+ * Address X/Y search enable in ASE correspondingly.
+ * 1'b0: Search disable
+ * 1'b1: Search Enable
+ *
+ * cmd_num
+ * This number can be used to match against status.
+ *
+ * data_length
+ * MSDU length in case of direct descriptor. Length of link
+ * extension descriptor in case of Link extension descriptor.
+ *
+ * *_checksum_en
+ * Enable checksum replacement for ipv4, udp_over_ipv4, ipv6,
+ * udp_over_ipv6, tcp_over_ipv4 and tcp_over_ipv6.
+ *
+ * to_fw
+ * Forward packet to FW along with classification result. The
+ * packet will not be forward to TQM when this bit is set.
+ * 1'b0: Use classification result to forward the packet.
+ * 1'b1: Override classification result & forward packet only to fw
+ *
+ * packet_offset
+ * Packet offset from Metadata in case of direct buffer descriptor.
+ *
+ * buffer_timestamp
+ * buffer_timestamp_valid
+ * Frame system entrance timestamp. It shall be filled by first
+ * module (SW, TCL or TQM) that sees the frames first.
+ *
+ * mesh_enable
+ * For raw WiFi frames, this indicates transmission to a mesh STA,
+ * enabling the interpretation of the 'Mesh Control Present' bit
+ * (bit 8) of QoS Control.
+ * For native WiFi frames, this indicates that a 'Mesh Control'
+ * field is present between the header and the LLC.
+ *
+ * hlos_tid_overwrite
+ *
+ * When set, TCL shall ignore the IP DSCP and VLAN PCP
+ * fields and use HLOS_TID as the final TID. Otherwise TCL
+ * shall consider the DSCP and PCP fields as well as HLOS_TID
+ * and choose a final TID based on the configured priority
+ *
+ * hlos_tid
+ * HLOS MSDU priority
+ * Field is used when HLOS_TID_overwrite is set.
+ *
+ * lmac_id
+ * TCL uses this LMAC_ID in address search, i.e, while
+ * finding matching entry for the packet in AST corresponding
+ * to given LMAC_ID
+ *
+ * If LMAC ID is all 1s (=> value 3), it indicates wildcard
+ * match for any MAC
+ *
+ * dscp_tid_table_num
+ * DSCP to TID mapping table number that need to be used
+ * for the MSDU.
+ *
+ * search_index
+ * The index that will be used for index based address or
+ * flow search. The field is valid when 'search_type' is 1 or 2.
+ *
+ * cache_set_num
+ *
+ * Cache set number that should be used to cache the index
+ * based search results, for address and flow search. This
+ * value should be equal to LSB four bits of the hash value of
+ * match data, in case of search index points to an entry which
+ * may be used in content based search also. The value can be
+ * anything when the entry pointed by search index will not be
+ * used for content based search.
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ *
+ * looping_count
+ *
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TCL_DESC_LEN sizeof(struct hal_tcl_data_cmd)
+
+enum hal_tcl_gse_ctrl {
+ HAL_TCL_GSE_CTRL_RD_STAT,
+ HAL_TCL_GSE_CTRL_SRCH_DIS,
+ HAL_TCL_GSE_CTRL_WR_BK_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_ALL,
+ HAL_TCL_GSE_CTRL_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_SINGLE,
+ HAL_TCL_GSE_CTRL_WR_BK_INVAL_ALL,
+ HAL_TCL_GSE_CTRL_CLR_STAT_SINGLE,
+};
+
+/* hal_tcl_gse_ctrl
+ *
+ * rd_stat
+ * Report or Read statistics
+ * srch_dis
+ * Search disable. Report only Hash.
+ * wr_bk_single
+ * Write Back single entry
+ * wr_bk_all
+ * Write Back entire cache entry
+ * inval_single
+ * Invalidate single cache entry
+ * inval_all
+ * Invalidate entire cache
+ * wr_bk_inval_single
+ * Write back and invalidate single entry in cache
+ * wr_bk_inval_all
+ * Write back and invalidate entire cache
+ * clr_stat_single
+ * Clear statistics for single entry
+ */
+
+#define HAL_TCL_GSE_CMD_INFO0_CTRL_BUF_ADDR_HI GENMASK(7, 0)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_CTRL GENMASK(11, 8)
+#define HAL_TCL_GSE_CMD_INFO0_GSE_SEL BIT(12)
+#define HAL_TCL_GSE_CMD_INFO0_STATUS_DEST_RING_ID BIT(13)
+#define HAL_TCL_GSE_CMD_INFO0_SWAP BIT(14)
+
+#define HAL_TCL_GSE_CMD_INFO1_RING_ID GENMASK(27, 20)
+#define HAL_TCL_GSE_CMD_INFO1_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_gse_cmd {
+ u32 ctrl_buf_addr_lo;
+ u32 info0;
+ u32 meta_data[2];
+ u32 rsvd0[2];
+ u32 info1;
+} __packed;
+
+/* hal_tcl_gse_cmd
+ *
+ * ctrl_buf_addr_lo, ctrl_buf_addr_hi
+ * Address of a control buffer containing additional info needed
+ * for this command execution.
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * status_destination_ring_id
+ * TCL status ring to which the GSE status needs to be send.
+ *
+ * swap
+ * Bit to enable byte swapping of contents of buffer.
+ *
+ * meta_data
+ * Meta data to be returned in the status descriptor
+ */
+
+enum hal_tcl_cache_op_res {
+ HAL_TCL_CACHE_OP_RES_DONE,
+ HAL_TCL_CACHE_OP_RES_NOT_FOUND,
+ HAL_TCL_CACHE_OP_RES_TIMEOUT,
+};
+
+#define HAL_TCL_STATUS_RING_INFO0_GSE_CTRL GENMASK(3, 0)
+#define HAL_TCL_STATUS_RING_INFO0_GSE_SEL BIT(4)
+#define HAL_TCL_STATUS_RING_INFO0_CACHE_OP_RES GENMASK(6, 5)
+#define HAL_TCL_STATUS_RING_INFO0_MSDU_CNT GENMASK(31, 8)
+
+#define HAL_TCL_STATUS_RING_INFO1_HASH_IDX GENMASK(19, 0)
+
+#define HAL_TCL_STATUS_RING_INFO2_RING_ID GENMASK(27, 20)
+#define HAL_TCL_STATUS_RING_INFO2_LOOPING_COUNT GENMASK(31, 28)
+
+struct hal_tcl_status_ring {
+ u32 info0;
+ u32 msdu_byte_count;
+ u32 msdu_timestamp;
+ u32 meta_data[2];
+ u32 info1;
+ u32 rsvd0;
+ u32 info2;
+} __packed;
+
+/* hal_tcl_status_ring
+ *
+ * gse_ctrl
+ * GSE control operations. This includes cache operations and table
+ * entry statistics read/clear operation. Values are defined in
+ * enum %HAL_TCL_GSE_CTRL.
+ *
+ * gse_sel
+ * To select the ASE/FSE to do the operation mention by GSE_ctrl.
+ * 0: FSE select 1: ASE select
+ *
+ * cache_op_res
+ * Cache operation result. Values are defined in enum
+ * %HAL_TCL_CACHE_OP_RES_.
+ *
+ * msdu_cnt
+ * msdu_byte_count
+ * MSDU count of Entry and MSDU byte count for entry 1.
+ *
+ * hash_indx
+ * Hash value of the entry in case of search failed or disabled.
+ */
+
+#define HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_SRC_DESC_ADDR_INFO_HASH_EN BIT(8)
+#define HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP BIT(9)
+#define HAL_CE_SRC_DESC_ADDR_INFO_DEST_SWAP BIT(10)
+#define HAL_CE_SRC_DESC_ADDR_INFO_GATHER BIT(11)
+#define HAL_CE_SRC_DESC_ADDR_INFO_LEN GENMASK(31, 16)
+
+#define HAL_CE_SRC_DESC_META_INFO_DATA GENMASK(15, 0)
+
+#define HAL_CE_SRC_DESC_FLAGS_RING_ID GENMASK(27, 20)
+#define HAL_CE_SRC_DESC_FLAGS_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_src_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_SRC_DESC_ADDR_INFO_ */
+ u32 meta_info; /* %HAL_CE_SRC_DESC_META_INFO_ */
+ u32 flags; /* %HAL_CE_SRC_DESC_FLAGS_ */
+} __packed;
+
+/*
+ * hal_ce_srng_src_desc
+ *
+ * buffer_addr_lo
+ * LSB 32 bits of the 40 Bit Pointer to the source buffer
+ *
+ * buffer_addr_hi
+ * MSB 8 bits of the 40 Bit Pointer to the source buffer
+ *
+ * toeplitz_en
+ * Enable generation of 32-bit Toeplitz-LFSR hash for
+ * data transfer. In case of gather field in first source
+ * ring entry of the gather copy cycle in taken into account.
+ *
+ * src_swap
+ * Treats source memory organization as big-endian. For
+ * each dword read (4 bytes), the byte 0 is swapped with byte 3
+ * and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * dest_swap
+ * Treats destination memory organization as big-endian.
+ * For each dword write (4 bytes), the byte 0 is swapped with
+ * byte 3 and byte 1 is swapped with byte 2.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * gather
+ * Enables gather of multiple copy engine source
+ * descriptors to one destination.
+ *
+ * ce_res_0
+ * Reserved
+ *
+ *
+ * length
+ * Length of the buffer in units of octets of the current
+ * descriptor
+ *
+ * fw_metadata
+ * Meta data used by FW.
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_1
+ * Reserved
+ *
+ * ce_res_2
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI GENMASK(7, 0)
+#define HAL_CE_DEST_DESC_ADDR_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DEST_DESC_ADDR_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dest_desc {
+ u32 buffer_addr_low;
+ u32 buffer_addr_info; /* %HAL_CE_DEST_DESC_ADDR_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dest_desc
+ *
+ * dst_buffer_low
+ * LSB 32 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * dst_buffer_high
+ * MSB 8 bits of the 40 Bit Pointer to the Destination
+ * buffer
+ *
+ * ce_res_4
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_CE_DST_STATUS_DESC_FLAGS_HASH_EN BIT(8)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_BYTE_SWAP BIT(9)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_DEST_SWAP BIT(10)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_GATHER BIT(11)
+#define HAL_CE_DST_STATUS_DESC_FLAGS_LEN GENMASK(31, 16)
+
+#define HAL_CE_DST_STATUS_DESC_META_INFO_DATA GENMASK(15, 0)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_RING_ID GENMASK(27, 20)
+#define HAL_CE_DST_STATUS_DESC_META_INFO_LOOP_CNT HAL_SRNG_DESC_LOOP_CNT
+
+struct hal_ce_srng_dst_status_desc {
+ u32 flags; /* %HAL_CE_DST_STATUS_DESC_FLAGS_ */
+ u32 toeplitz_hash0;
+ u32 toeplitz_hash1;
+ u32 meta_info; /* HAL_CE_DST_STATUS_DESC_META_INFO_ */
+} __packed;
+
+/* hal_ce_srng_dst_status_desc
+ *
+ * ce_res_5
+ * Reserved
+ *
+ * toeplitz_en
+ *
+ * src_swap
+ * Source memory buffer swapped
+ *
+ * dest_swap
+ * Destination memory buffer swapped
+ *
+ * gather
+ * Gather of multiple copy engine source descriptors to one
+ * destination enabled
+ *
+ * ce_res_6
+ * Reserved
+ *
+ * length
+ * Sum of all the Lengths of the source descriptor in the
+ * gather chain
+ *
+ * toeplitz_hash_0
+ * 32 LS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * toeplitz_hash_1
+ * 32 MS bits of 64 bit Toeplitz LFSR hash result
+ *
+ * fw_metadata
+ * Meta data used by FW
+ * In case of gather field in first source ring entry of
+ * the gather copy cycle in taken into account.
+ *
+ * ce_res_7
+ * Reserved
+ *
+ * ring_id
+ * The buffer pointer ring ID.
+ * 0 refers to the IDLE ring
+ * 1 - N refers to other rings
+ * Helps with debugging when dumping ring contents.
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Ring has looped around the
+ * ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+#define HAL_TX_RATE_STATS_INFO0_VALID BIT(0)
+#define HAL_TX_RATE_STATS_INFO0_BW GENMASK(2, 1)
+#define HAL_TX_RATE_STATS_INFO0_PKT_TYPE GENMASK(6, 3)
+#define HAL_TX_RATE_STATS_INFO0_STBC BIT(7)
+#define HAL_TX_RATE_STATS_INFO0_LDPC BIT(8)
+#define HAL_TX_RATE_STATS_INFO0_SGI GENMASK(10, 9)
+#define HAL_TX_RATE_STATS_INFO0_MCS GENMASK(14, 11)
+#define HAL_TX_RATE_STATS_INFO0_OFDMA_TX BIT(15)
+#define HAL_TX_RATE_STATS_INFO0_TONES_IN_RU GENMASK(27, 16)
+
+enum hal_tx_rate_stats_bw {
+ HAL_TX_RATE_STATS_BW_20,
+ HAL_TX_RATE_STATS_BW_40,
+ HAL_TX_RATE_STATS_BW_80,
+ HAL_TX_RATE_STATS_BW_160,
+};
+
+enum hal_tx_rate_stats_pkt_type {
+ HAL_TX_RATE_STATS_PKT_TYPE_11A,
+ HAL_TX_RATE_STATS_PKT_TYPE_11B,
+ HAL_TX_RATE_STATS_PKT_TYPE_11N,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AC,
+ HAL_TX_RATE_STATS_PKT_TYPE_11AX,
+};
+
+enum hal_tx_rate_stats_sgi {
+ HAL_TX_RATE_STATS_SGI_08US,
+ HAL_TX_RATE_STATS_SGI_04US,
+ HAL_TX_RATE_STATS_SGI_16US,
+ HAL_TX_RATE_STATS_SGI_32US,
+};
+
+struct hal_tx_rate_stats {
+ u32 info0;
+ u32 tsf;
+} __packed;
+
+struct hal_wbm_link_desc {
+ struct ath11k_buffer_addr buf_addr_info;
+} __packed;
+
+/* hal_wbm_link_desc
+ *
+ * Producer: WBM
+ * Consumer: WBM
+ *
+ * buf_addr_info
+ * Details of the physical address of a buffer or MSDU
+ * link descriptor.
+ */
+
+enum hal_wbm_rel_src_module {
+ HAL_WBM_REL_SRC_MODULE_TQM,
+ HAL_WBM_REL_SRC_MODULE_RXDMA,
+ HAL_WBM_REL_SRC_MODULE_REO,
+ HAL_WBM_REL_SRC_MODULE_FW,
+ HAL_WBM_REL_SRC_MODULE_SW,
+};
+
+enum hal_wbm_rel_desc_type {
+ HAL_WBM_REL_DESC_TYPE_REL_MSDU,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MPDU_LINK,
+ HAL_WBM_REL_DESC_TYPE_MSDU_EXT,
+ HAL_WBM_REL_DESC_TYPE_QUEUE_EXT,
+};
+
+/* hal_wbm_rel_desc_type
+ *
+ * msdu_buffer
+ * The address points to an MSDU buffer
+ *
+ * msdu_link_descriptor
+ * The address points to an Tx MSDU link descriptor
+ *
+ * mpdu_link_descriptor
+ * The address points to an MPDU link descriptor
+ *
+ * msdu_ext_descriptor
+ * The address points to an MSDU extension descriptor
+ *
+ * queue_ext_descriptor
+ * The address points to an TQM queue extension descriptor. WBM should
+ * treat this is the same way as a link descriptor.
+ */
+
+enum hal_wbm_rel_bm_act {
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE,
+ HAL_WBM_REL_BM_ACT_REL_MSDU,
+};
+
+/* hal_wbm_rel_bm_act
+ *
+ * put_in_idle_list
+ * Put the buffer or descriptor back in the idle list. In case of MSDU or
+ * MDPU link descriptor, BM does not need to check to release any
+ * individual MSDU buffers.
+ *
+ * release_msdu_list
+ * This BM action can only be used in combination with desc_type being
+ * msdu_link_descriptor. Field first_msdu_index points out which MSDU
+ * pointer in the MSDU link descriptor is the first of an MPDU that is
+ * released. BM shall release all the MSDU buffers linked to this first
+ * MSDU buffer pointer. All related MSDU buffer pointer entries shall be
+ * set to value 0, which represents the 'NULL' pointer. When all MSDU
+ * buffer pointers in the MSDU link descriptor are 'NULL', the MSDU link
+ * descriptor itself shall also be released.
+ */
+
+#define HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE GENMASK(2, 0)
+#define HAL_WBM_RELEASE_INFO0_BM_ACTION GENMASK(5, 3)
+#define HAL_WBM_RELEASE_INFO0_DESC_TYPE GENMASK(8, 6)
+#define HAL_WBM_RELEASE_INFO0_FIRST_MSDU_IDX GENMASK(12, 9)
+#define HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON GENMASK(16, 13)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON GENMASK(18, 17)
+#define HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE GENMASK(23, 19)
+#define HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON GENMASK(25, 24)
+#define HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE GENMASK(30, 26)
+#define HAL_WBM_RELEASE_INFO0_WBM_INTERNAL_ERROR BIT(31)
+
+#define HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER GENMASK(23, 0)
+#define HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT GENMASK(30, 24)
+
+#define HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI GENMASK(7, 0)
+#define HAL_WBM_RELEASE_INFO2_SW_REL_DETAILS_VALID BIT(8)
+#define HAL_WBM_RELEASE_INFO2_FIRST_MSDU BIT(9)
+#define HAL_WBM_RELEASE_INFO2_LAST_MSDU BIT(10)
+#define HAL_WBM_RELEASE_INFO2_MSDU_IN_AMSDU BIT(11)
+#define HAL_WBM_RELEASE_INFO2_FW_TX_NOTIF_FRAME BIT(12)
+#define HAL_WBM_RELEASE_INFO2_BUFFER_TIMESTAMP GENMASK(31, 13)
+
+#define HAL_WBM_RELEASE_INFO3_PEER_ID GENMASK(15, 0)
+#define HAL_WBM_RELEASE_INFO3_TID GENMASK(19, 16)
+#define HAL_WBM_RELEASE_INFO3_RING_ID GENMASK(27, 20)
+#define HAL_WBM_RELEASE_INFO3_LOOPING_COUNT GENMASK(31, 28)
+
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_STATUS GENMASK(12, 9)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_REINJ_REASON GENMASK(16, 13)
+#define HAL_WBM_REL_HTT_TX_COMP_INFO0_EXP_FRAME BIT(17)
+
+struct hal_wbm_release_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ struct hal_tx_rate_stats rate_stats;
+ u32 info3;
+} __packed;
+
+/* hal_wbm_release_ring
+ *
+ * Producer: SW/TQM/RXDMA/REO/SWITCH
+ * Consumer: WBM/SW/FW
+ *
+ * HTT tx status is overlaid on wbm_release ring on 4-byte words 2, 3, 4 and 5
+ * for software based completions.
+ *
+ * buf_addr_info
+ * Details of the physical address of the buffer or link descriptor.
+ *
+ * release_source_module
+ * Indicates which module initiated the release of this buffer/descriptor.
+ * Values are defined in enum %HAL_WBM_REL_SRC_MODULE_.
+ *
+ * bm_action
+ * Field only valid when the field return_buffer_manager in
+ * Released_buff_or_desc_addr_info indicates:
+ * WBM_IDLE_BUF_LIST / WBM_IDLE_DESC_LIST
+ * Values are defined in enum %HAL_WBM_REL_BM_ACT_.
+ *
+ * buffer_or_desc_type
+ * Field only valid when WBM is marked as the return_buffer_manager in
+ * the Released_Buffer_address_info. Indicates that type of buffer or
+ * descriptor is being released. Values are in enum %HAL_WBM_REL_DESC_TYPE.
+ *
+ * first_msdu_index
+ * Field only valid for the bm_action release_msdu_list. The index of the
+ * first MSDU in an MSDU link descriptor all belonging to the same MPDU.
+ *
+ * tqm_release_reason
+ * Field only valid when Release_source_module is set to release_source_TQM
+ * Release reasons are defined in enum %HAL_WBM_TQM_REL_REASON_.
+ *
+ * rxdma_push_reason
+ * reo_push_reason
+ * Indicates why rxdma/reo pushed the frame to this ring and values are
+ * defined in enum %HAL_REO_DEST_RING_PUSH_REASON_.
+ *
+ * rxdma_error_code
+ * Field only valid when 'rxdma_push_reason' set to 'error_detected'.
+ * Values are defined in enum %HAL_REO_ENTR_RING_RXDMA_ECODE_.
+ *
+ * reo_error_code
+ * Field only valid when 'reo_push_reason' set to 'error_detected'. Values
+ * are defined in enum %HAL_REO_DEST_RING_ERROR_CODE_.
+ *
+ * wbm_internal_error
+ * Is set when WBM got a buffer pointer but the action was to push it to
+ * the idle link descriptor ring or do link related activity OR
+ * Is set when WBM got a link buffer pointer but the action was to push it
+ * to the buffer descriptor ring.
+ *
+ * tqm_status_number
+ * The value in this field is equal to tqm_cmd_number in TQM command. It is
+ * used to correlate the statu with TQM commands. Only valid when
+ * release_source_module is TQM.
+ *
+ * transmit_count
+ * The number of times the frame has been transmitted, valid only when
+ * release source in TQM.
+ *
+ * ack_frame_rssi
+ * This field is only valid when the source is TQM. If this frame is
+ * removed as the result of the reception of an ACK or BA, this field
+ * indicates the RSSI of the received ACK or BA frame.
+ *
+ * sw_release_details_valid
+ * This is set when WMB got a 'release_msdu_list' command from TQM and
+ * return buffer manager is not WMB. WBM will then de-aggregate all MSDUs
+ * and pass them one at a time on to the 'buffer owner'.
+ *
+ * first_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the first MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * last_msdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU is the last MSDU pointed to in the
+ * 'release_msdu_list' command.
+ *
+ * msdu_part_of_amsdu
+ * Field only valid when SW_release_details_valid is set.
+ * When set, this MSDU was part of an A-MSDU in MPDU
+ *
+ * fw_tx_notify_frame
+ * Field only valid when SW_release_details_valid is set.
+ *
+ * buffer_timestamp
+ * Field only valid when SW_release_details_valid is set.
+ * This is the Buffer_timestamp field from the
+ * Timestamp in units of 1024 us
+ *
+ * struct hal_tx_rate_stats rate_stats
+ * Details for command execution tracking purposes.
+ *
+ * sw_peer_id
+ * tid
+ * Field only valid when Release_source_module is set to
+ * release_source_TQM
+ *
+ * 1) Release of msdu buffer due to drop_frame = 1. Flow is
+ * not fetched and hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 2) Release of msdu buffer due to Flow is not fetched and
+ * hence sw_peer_id and tid = 0
+ *
+ * buffer_or_desc_type = e_num 0
+ * MSDU_rel_buffertqm_release_reason = e_num 1
+ * tqm_rr_rem_cmd_rem
+ *
+ * 3) Release of msdu link due to remove_mpdu or acked_mpdu
+ * command.
+ *
+ * buffer_or_desc_type = e_num1
+ * msdu_link_descriptortqm_release_reason can be:e_num 1
+ * tqm_rr_rem_cmd_reme_num 2 tqm_rr_rem_cmd_tx
+ * e_num 3 tqm_rr_rem_cmd_notxe_num 4 tqm_rr_rem_cmd_aged
+ *
+ * This field represents the TID from the TX_MSDU_FLOW
+ * descriptor or TX_MPDU_QUEUE descriptor
+ *
+ * rind_id
+ * For debugging.
+ * This field is filled in by the SRNG module.
+ * It help to identify the ring that is being looked
+ *
+ * looping_count
+ * A count value that indicates the number of times the
+ * producer of entries into the Buffer Manager Ring has looped
+ * around the ring.
+ *
+ * At initialization time, this value is set to 0. On the
+ * first loop, this value is set to 1. After the max value is
+ * reached allowed by the number of bits for this field, the
+ * count value continues with 0 again.
+ *
+ * In case SW is the consumer of the ring entries, it can
+ * use this field to figure out up to where the producer of
+ * entries has created new entries. This eliminates the need to
+ * check where the head pointer' of the ring is located once
+ * the SW starts processing an interrupt indicating that new
+ * entries have been put into this ring...
+ *
+ * Also note that SW if it wants only needs to look at the
+ * LSB bit of this count value.
+ */
+
+/**
+ * enum hal_wbm_tqm_rel_reason - TQM release reason code
+ * @HAL_WBM_TQM_REL_REASON_FRAME_ACKED: ACK or BACK received for the frame
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: Command remove_mpdus initiated by SW
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: Command remove transmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX: Command remove untransmitted_mpdus
+ * initiated by sw.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: Command remove aged msdus or
+ * mpdus.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1: Remove command initiated by
+ * fw with fw_reason1.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2: Remove command initiated by
+ * fw with fw_reason2.
+ * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
+ * fw with fw_reason3.
+ */
+enum hal_wbm_tqm_rel_reason {
+ HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_NOTX,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
+ HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+};
+
+struct hal_wbm_buffer_ring {
+ struct ath11k_buffer_addr buf_addr_info;
+};
+
+enum hal_desc_owner {
+ HAL_DESC_OWNER_WBM,
+ HAL_DESC_OWNER_SW,
+ HAL_DESC_OWNER_TQM,
+ HAL_DESC_OWNER_RXDMA,
+ HAL_DESC_OWNER_REO,
+ HAL_DESC_OWNER_SWITCH,
+};
+
+enum hal_desc_buf_type {
+ HAL_DESC_BUF_TYPE_TX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_HEAD,
+ HAL_DESC_BUF_TYPE_TX_MPDU_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_TX_FLOW,
+ HAL_DESC_BUF_TYPE_TX_BUFFER,
+ HAL_DESC_BUF_TYPE_RX_MSDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_MPDU_LINK,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE,
+ HAL_DESC_BUF_TYPE_RX_REO_QUEUE_EXT,
+ HAL_DESC_BUF_TYPE_RX_BUFFER,
+ HAL_DESC_BUF_TYPE_IDLE_LINK,
+};
+
+#define HAL_DESC_REO_OWNED 4
+#define HAL_DESC_REO_QUEUE_DESC 8
+#define HAL_DESC_REO_QUEUE_EXT_DESC 9
+#define HAL_DESC_REO_NON_QOS_TID 16
+
+#define HAL_DESC_HDR_INFO0_OWNER GENMASK(3, 0)
+#define HAL_DESC_HDR_INFO0_BUF_TYPE GENMASK(7, 4)
+#define HAL_DESC_HDR_INFO0_DBG_RESERVED GENMASK(31, 8)
+
+struct hal_desc_header {
+ u32 info0;
+} __packed;
+
+struct hal_rx_mpdu_link_ptr {
+ struct ath11k_buffer_addr addr_info;
+} __packed;
+
+struct hal_rx_msdu_details {
+ struct ath11k_buffer_addr buf_addr_info;
+ struct rx_msdu_desc rx_msdu_info;
+} __packed;
+
+#define HAL_RX_MSDU_LNK_INFO0_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_RX_MSDU_LNK_INFO0_FIRST_MSDU_LNK BIT(16)
+
+struct hal_rx_msdu_link {
+ struct hal_desc_header desc_hdr;
+ struct ath11k_buffer_addr buf_addr_info;
+ u32 info0;
+ u32 pn[4];
+ struct hal_rx_msdu_details msdu_link[6];
+} __packed;
+
+struct hal_rx_reo_queue_ext {
+ struct hal_desc_header desc_hdr;
+ u32 rsvd;
+ struct hal_rx_mpdu_link_ptr mpdu_link[15];
+} __packed;
+
+/* hal_rx_reo_queue_ext
+ * Consumer: REO
+ * Producer: REO
+ *
+ * descriptor_header
+ * Details about which module owns this struct.
+ *
+ * mpdu_link
+ * Pointer to the next MPDU_link descriptor in the MPDU queue.
+ */
+
+enum hal_rx_reo_queue_pn_size {
+ HAL_RX_REO_QUEUE_PN_SIZE_24,
+ HAL_RX_REO_QUEUE_PN_SIZE_48,
+ HAL_RX_REO_QUEUE_PN_SIZE_128,
+};
+
+#define HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER GENMASK(15, 0)
+
+#define HAL_RX_REO_QUEUE_INFO0_VLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER GENMASK(2, 1)
+#define HAL_RX_REO_QUEUE_INFO0_DIS_DUP_DETECTION BIT(3)
+#define HAL_RX_REO_QUEUE_INFO0_SOFT_REORDER_EN BIT(4)
+#define HAL_RX_REO_QUEUE_INFO0_AC GENMASK(6, 5)
+#define HAL_RX_REO_QUEUE_INFO0_BAR BIT(7)
+#define HAL_RX_REO_QUEUE_INFO0_RETRY BIT(8)
+#define HAL_RX_REO_QUEUE_INFO0_CHECK_2K_MODE BIT(9)
+#define HAL_RX_REO_QUEUE_INFO0_OOR_MODE BIT(10)
+#define HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE GENMASK(18, 11)
+#define HAL_RX_REO_QUEUE_INFO0_PN_CHECK BIT(19)
+#define HAL_RX_REO_QUEUE_INFO0_EVEN_PN BIT(20)
+#define HAL_RX_REO_QUEUE_INFO0_UNEVEN_PN BIT(21)
+#define HAL_RX_REO_QUEUE_INFO0_PN_HANDLE_ENABLE BIT(22)
+#define HAL_RX_REO_QUEUE_INFO0_PN_SIZE GENMASK(24, 23)
+#define HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG BIT(25)
+
+#define HAL_RX_REO_QUEUE_INFO1_SVLD BIT(0)
+#define HAL_RX_REO_QUEUE_INFO1_SSN GENMASK(12, 1)
+#define HAL_RX_REO_QUEUE_INFO1_CURRENT_IDX GENMASK(20, 13)
+#define HAL_RX_REO_QUEUE_INFO1_SEQ_2K_ERR BIT(21)
+#define HAL_RX_REO_QUEUE_INFO1_PN_ERR BIT(22)
+#define HAL_RX_REO_QUEUE_INFO1_PN_VALID BIT(31)
+
+#define HAL_RX_REO_QUEUE_INFO2_MPDU_COUNT GENMASK(6, 0)
+#define HAL_RX_REO_QUEUE_INFO2_MSDU_COUNT (31, 7)
+
+#define HAL_RX_REO_QUEUE_INFO3_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_RX_REO_QUEUE_INFO3_FWD_DUE_TO_BAR_CNT GENMASK(15, 10)
+#define HAL_RX_REO_QUEUE_INFO3_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_RX_REO_QUEUE_INFO4_FRAME_IN_ORD_COUNT GENMASK(23, 0)
+#define HAL_RX_REO_QUEUE_INFO4_BAR_RECVD_COUNT GENMASK(31, 24)
+
+#define HAL_RX_REO_QUEUE_INFO5_LATE_RX_MPDU_COUNT GENMASK(11, 0)
+#define HAL_RX_REO_QUEUE_INFO5_WINDOW_JUMP_2K GENMASK(15, 12)
+#define HAL_RX_REO_QUEUE_INFO5_HOLE_COUNT GENMASK(31, 16)
+
+struct hal_rx_reo_queue {
+ struct hal_desc_header desc_hdr;
+ u32 rx_queue_num;
+ u32 info0;
+ u32 info1;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 next_aging_queue[2];
+ u32 prev_aging_queue[2];
+ u32 rx_bitmap[8];
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 processed_mpdus;
+ u32 processed_msdus;
+ u32 processed_total_bytes;
+ u32 info5;
+ u32 rsvd[3];
+ struct hal_rx_reo_queue_ext ext_desc[];
+} __packed;
+
+/* hal_rx_reo_queue
+ *
+ * descriptor_header
+ * Details about which module owns this struct. Note that sub field
+ * Buffer_type shall be set to receive_reo_queue_descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link descriptor belongs.
+ *
+ * vld
+ * Valid bit indicating a session is established and the queue descriptor
+ * is valid.
+ * associated_link_descriptor_counter
+ * Indicates which of the 3 link descriptor counters shall be incremented
+ * or decremented when link descriptors are added or removed from this
+ * flow queue.
+ * disable_duplicate_detection
+ * When set, do not perform any duplicate detection.
+ * soft_reorder_enable
+ * When set, REO has been instructed to not perform the actual re-ordering
+ * of frames for this queue, but just to insert the reorder opcodes.
+ * ac
+ * Indicates the access category of the queue descriptor.
+ * bar
+ * Indicates if BAR has been received.
+ * retry
+ * Retry bit is checked if this bit is set.
+ * chk_2k_mode
+ * Indicates what type of operation is expected from Reo when the received
+ * frame SN falls within the 2K window.
+ * oor_mode
+ * Indicates what type of operation is expected when the received frame
+ * falls within the OOR window.
+ * ba_window_size
+ * Indicates the negotiated (window size + 1). Max of 256 bits.
+ *
+ * A value 255 means 256 bitmap, 63 means 64 bitmap, 0 (means non-BA
+ * session, with window size of 0). The 3 values here are the main values
+ * validated, but other values should work as well.
+ *
+ * A BA window size of 0 (=> one frame entry bitmat), means that there is
+ * no additional rx_reo_queue_ext desc. following rx_reo_queue in memory.
+ * A BA window size of 1 - 105, means that there is 1 rx_reo_queue_ext.
+ * A BA window size of 106 - 210, means that there are 2 rx_reo_queue_ext.
+ * A BA window size of 211 - 256, means that there are 3 rx_reo_queue_ext.
+ * pn_check_needed, pn_shall_be_even, pn_shall_be_uneven, pn_handling_enable,
+ * pn_size
+ * REO shall perform the PN increment check, even number check, uneven
+ * number check, PN error check and size of the PN field check.
+ * ignore_ampdu_flag
+ * REO shall ignore the ampdu_flag on entrance descriptor for this queue.
+ *
+ * svld
+ * Sequence number in next field is valid one.
+ * ssn
+ * Starting Sequence number of the session.
+ * current_index
+ * Points to last forwarded packet
+ * seq_2k_error_detected_flag
+ * REO has detected a 2k error jump in the sequence number and from that
+ * moment forward, all new frames are forwarded directly to FW, without
+ * duplicate detect, reordering, etc.
+ * pn_error_detected_flag
+ * REO has detected a PN error.
+ */
+
+#define HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM BIT(8)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD BIT(9)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION BIT(11)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN BIT(12)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC BIT(13)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR BIT(14)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY BIT(15)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE BIT(17)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE BIT(18)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN BIT(21)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE BIT(22)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_ERR BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN BIT(30)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER GENMASK(15, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_VLD BIT(16)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER GENMASK(18, 17)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION BIT(19)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN BIT(20)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_AC GENMASK(22, 21)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_BAR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_RETRY BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE BIT(25)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE BIT(26)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK BIT(27)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN BIT(28)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN BIT(29)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE BIT(30)
+#define HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG BIT(31)
+
+#define HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE GENMASK(7, 0)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE GENMASK(9, 8)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SVLD BIT(10)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SSN GENMASK(22, 11)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR BIT(23)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR BIT(24)
+#define HAL_REO_UPD_RX_QUEUE_INFO2_PN_VALID BIT(25)
+
+struct hal_reo_update_rx_queue {
+ struct hal_reo_cmd_hdr cmd;
+ u32 queue_addr_lo;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 pn[4];
+} __packed;
+
+#define HAL_REO_UNBLOCK_CACHE_INFO0_UNBLK_CACHE BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_INFO0_RESOURCE_IDX GENMASK(2, 1)
+
+struct hal_reo_unblock_cache {
+ struct hal_reo_cmd_hdr cmd;
+ u32 info0;
+ u32 rsvd[7];
+} __packed;
+
+enum hal_reo_exec_status {
+ HAL_REO_EXEC_STATUS_SUCCESS,
+ HAL_REO_EXEC_STATUS_BLOCKED,
+ HAL_REO_EXEC_STATUS_FAILED,
+ HAL_REO_EXEC_STATUS_RESOURCE_BLOCKED,
+};
+
+#define HAL_REO_STATUS_HDR_INFO0_STATUS_NUM GENMASK(15, 0)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_TIME GENMASK(25, 16)
+#define HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS GENMASK(27, 26)
+
+struct hal_reo_status_hdr {
+ u32 info0;
+ u32 timestamp;
+} __packed;
+
+/* hal_reo_status_hdr
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_num
+ * The value in this field is equal to value of the reo command
+ * number. This field helps to correlate the statuses with the REO
+ * commands.
+ *
+ * execution_time (in us)
+ * The amount of time REO took to execute the command. Note that
+ * this time does not include the duration of the command waiting
+ * in the command ring, before the execution started.
+ *
+ * execution_status
+ * Execution status of the command. Values are defined in
+ * enum %HAL_REO_EXEC_STATUS_.
+ */
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX GENMASK(19, 12)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT GENMASK(6, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT GENMASK(31, 7)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT GENMASK(9, 4)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT GENMASK(15, 10)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT GENMASK(23, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT GENMASK(31, 24)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU GENMASK(11, 0)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K GENMASK(15, 12)
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT GENMASK(31, 16)
+
+#define HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT GENMASK(31, 28)
+
+struct hal_reo_get_queue_stats_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 pn[4];
+ u32 last_rx_enqueue_timestamp;
+ u32 last_rx_dequeue_timestamp;
+ u32 rx_bitmap[8];
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 num_mpdu_frames;
+ u32 num_msdu_frames;
+ u32 total_bytes;
+ u32 info4;
+ u32 info5;
+} __packed;
+
+/* hal_reo_get_queue_stats_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * ssn
+ * Starting Sequence number of the session, this changes whenever
+ * window moves (can be filled by SW then maintained by REO).
+ *
+ * current_index
+ * Points to last forwarded packet.
+ *
+ * pn
+ * Bits of the PN number.
+ *
+ * last_rx_enqueue_timestamp
+ * last_rx_dequeue_timestamp
+ * Timestamp of arrival of the last MPDU for this queue and
+ * Timestamp of forwarding an MPDU accordingly.
+ *
+ * rx_bitmap
+ * When a bit is set, the corresponding frame is currently held
+ * in the re-order queue. The bitmap is Fully managed by HW.
+ *
+ * current_mpdu_count
+ * current_msdu_count
+ * The number of MPDUs and MSDUs in the queue.
+ *
+ * timeout_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Forwarding reason is timeout.
+ *
+ * forward_due_to_bar_count
+ * The number of times REO started forwarding frames even though
+ * there is a hole in the bitmap. Fwd reason is reception of BAR.
+ *
+ * duplicate_count
+ * The number of duplicate frames that have been detected.
+ *
+ * frames_in_order_count
+ * The number of frames that have been received in order (without
+ * a hole that prevented them from being forwarded immediately).
+ *
+ * bar_received_count
+ * The number of times a BAR frame is received.
+ *
+ * mpdu_frames_processed_count
+ * msdu_frames_processed_count
+ * The total number of MPDU/MSDU frames that have been processed.
+ *
+ * total_bytes
+ * An approximation of the number of bytes received for this queue.
+ *
+ * late_receive_mpdu_count
+ * The number of MPDUs received after the window had already moved
+ * on. The 'late' sequence window is defined as
+ * (Window SSN - 256) - (Window SSN - 1).
+ *
+ * window_jump_2k
+ * The number of times the window moved more than 2K
+ *
+ * hole_count
+ * The number of times a hole was created in the receive bitmap.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_STATUS_LOOP_CNT GENMASK(31, 28)
+
+#define HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED BIT(0)
+#define HAL_REO_FLUSH_QUEUE_INFO0_RSVD GENMASK(31, 1)
+#define HAL_REO_FLUSH_QUEUE_INFO1_RSVD GENMASK(27, 0)
+
+struct hal_reo_flush_queue_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_queue_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status of blocking resource
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - Error detected. The resource to be used for blocking was
+ * already in use.
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE GENMASK(2, 1)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT BIT(8)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE GENMASK(11, 9)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID GENMASK(15, 12)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR GENMASK(17, 16)
+#define HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT GENMASK(25, 18)
+
+struct hal_reo_flush_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_flush_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * Status for blocking resource handling
+ *
+ * 0 - No error has been detected while executing this command
+ * 1 - An error in the blocking resource management was detected
+ *
+ * block_error_details
+ * only valid when error_detected is set
+ *
+ * 0 - No blocking related errors found
+ * 1 - Blocking resource is already in use
+ * 2 - Resource requested to be unblocked, was not blocked
+ *
+ * cache_controller_flush_status_hit
+ * The status that the cache controller returned on executing the
+ * flush command.
+ *
+ * 0 - miss; 1 - hit
+ *
+ * cache_controller_flush_status_desc_type
+ * Flush descriptor type
+ *
+ * cache_controller_flush_status_client_id
+ * Module who made the flush request
+ *
+ * In REO, this is always 0
+ *
+ * cache_controller_flush_status_error
+ * Error condition
+ *
+ * 0 - No error found
+ * 1 - HW interface is still busy
+ * 2 - Line currently locked. Used for one line flush command
+ * 3 - At least one line is still locked.
+ * Used for cache flush command.
+ *
+ * cache_controller_flush_count
+ * The number of lines that were actually flushed out
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE BIT(1)
+
+struct hal_reo_unblock_cache_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 rsvd0[21];
+ u32 info1;
+} __packed;
+
+/* hal_reo_unblock_cache_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - The blocking resource was not in use, and therefore it could
+ * not be unblocked.
+ *
+ * unblock_type
+ * Reference to the type of unblock command
+ * 0 - Unblock a blocking resource
+ * 1 - The entire cache usage is unblock
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR BIT(0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY BIT(1)
+
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT GENMASK(15, 0)
+#define HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT GENMASK(31, 16)
+
+struct hal_reo_flush_timeout_list_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 rsvd0[20];
+ u32 info2;
+} __packed;
+
+/* hal_reo_flush_timeout_list_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * error_detected
+ * 0 - No error has been detected while executing this command
+ * 1 - Command not properly executed and returned with error
+ *
+ * timeout_list_empty
+ * When set, REO has depleted the timeout list and all entries are
+ * gone.
+ *
+ * release_desc_count
+ * Producer: SW; Consumer: REO
+ * The number of link descriptor released
+ *
+ * forward_buf_count
+ * Producer: SW; Consumer: REO
+ * The number of buffers forwarded to the REO destination rings
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#define HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX GENMASK(1, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2 GENMASK(23, 0)
+#define HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM GENMASK(25, 0)
+
+struct hal_reo_desc_thresh_reached_status {
+ struct hal_reo_status_hdr hdr;
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 rsvd0[17];
+ u32 info5;
+} __packed;
+
+/* hal_reo_desc_thresh_reached_status
+ * Producer: REO
+ * Consumer: SW
+ *
+ * status_hdr
+ * Details that can link this status with the original command. It
+ * also contains info on how long REO took to execute this command.
+ *
+ * threshold_index
+ * The index of the threshold register whose value got reached
+ *
+ * link_descriptor_counter0
+ * link_descriptor_counter1
+ * link_descriptor_counter2
+ * link_descriptor_counter_sum
+ * Value of the respective counters at generation of this message
+ *
+ * looping_count
+ * A count value that indicates the number of times the producer of
+ * entries into this Ring has looped around the ring.
+ */
+
+#endif /* ATH11K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
new file mode 100644
index 0000000000..e5ed5efb13
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -0,0 +1,1611 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#include "debug.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hal_rx.h"
+#include "hal_desc.h"
+#include "hif.h"
+
+static void ath11k_hal_reo_set_desc_hdr(struct hal_desc_header *hdr,
+ u8 owner, u8 buffer_type, u32 magic)
+{
+ hdr->info0 = FIELD_PREP(HAL_DESC_HDR_INFO0_OWNER, owner) |
+ FIELD_PREP(HAL_DESC_HDR_INFO0_BUF_TYPE, buffer_type);
+
+ /* Magic pattern in reserved bits for debugging */
+ hdr->info0 |= FIELD_PREP(HAL_DESC_HDR_INFO0_DBG_RESERVED, magic);
+}
+
+static int ath11k_hal_reo_cmd_queue_stats(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_get_queue_stats *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_GET_QUEUE_STATS) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ memset_startat(desc, 0, queue_addr_lo);
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_GET_QUEUE_STATS_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi);
+ if (cmd->flag & HAL_REO_CMD_FLG_STATS_CLEAR)
+ desc->info0 |= HAL_REO_GET_QUEUE_STATS_INFO0_CLEAR_STATS;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_flush_cache(struct ath11k_hal *hal, struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_flush_cache *desc;
+ u8 avail_slot = ffz(hal->avail_blk_resource);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ if (avail_slot >= HAL_MAX_AVAIL_BLK_RES)
+ return -ENOSPC;
+
+ hal->current_blk_index = avail_slot;
+ }
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_FLUSH_CACHE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_flush_cache *)tlv->value;
+ memset_startat(desc, 0, cache_addr_lo);
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->cache_addr_lo = cmd->addr_lo;
+ desc->info0 = FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_CACHE_ADDR_HI,
+ cmd->addr_hi);
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_FWD_ALL_MPDUS)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FWD_ALL_MPDUS;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_BLOCK_LATER) {
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_BLOCK_CACHE_USAGE;
+ desc->info0 |=
+ FIELD_PREP(HAL_REO_FLUSH_CACHE_INFO0_BLOCK_RESRC_IDX,
+ avail_slot);
+ }
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_NO_INVAL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_WO_INVALIDATE;
+
+ if (cmd->flag & HAL_REO_CMD_FLG_FLUSH_ALL)
+ desc->info0 |= HAL_REO_FLUSH_CACHE_INFO0_FLUSH_ALL;
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+static int ath11k_hal_reo_cmd_update_rx_queue(struct hal_tlv_hdr *tlv,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_reo_update_rx_queue *desc;
+
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_REO_UPDATE_RX_REO_QUEUE) |
+ FIELD_PREP(HAL_TLV_HDR_LEN, sizeof(*desc));
+
+ desc = (struct hal_reo_update_rx_queue *)tlv->value;
+ memset_startat(desc, 0, queue_addr_lo);
+
+ desc->cmd.info0 &= ~HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+ if (cmd->flag & HAL_REO_CMD_FLG_NEED_STATUS)
+ desc->cmd.info0 |= HAL_REO_CMD_HDR_INFO0_STATUS_REQUIRED;
+
+ desc->queue_addr_lo = cmd->addr_lo;
+ desc->info0 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_QUEUE_ADDR_HI,
+ cmd->addr_hi) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RX_QUEUE_NUM,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RX_QUEUE_NUM)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_VLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_ASSOC_LNK_DESC_CNT,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_ALDC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_DIS_DUP_DETECTION,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SOFT_REORDER_EN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_AC,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_AC)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BAR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_RETRY,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_CHECK_2K_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_OOR_MODE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_BA_WINDOW_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_BA_WINDOW_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_CHECK,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_EVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_UNEVEN_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_HANDLE_ENABLE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_SIZE,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_SIZE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_IGNORE_AMPDU_FLG,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_IGNORE_AMPDU_FLG)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SVLD,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SSN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SSN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_SEQ_2K_ERR,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN_VALID,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN_VALID)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO0_UPD_PN,
+ !!(cmd->upd0 & HAL_REO_CMD_UPD0_PN));
+
+ desc->info1 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RX_QUEUE_NUMBER,
+ cmd->rx_queue_num) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_VLD,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_VLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_ASSOC_LNK_DESC_COUNTER,
+ FIELD_GET(HAL_REO_CMD_UPD1_ALDC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_DIS_DUP_DETECTION,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_DIS_DUP_DETECTION)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_SOFT_REORDER_EN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_SOFT_REORDER_EN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_AC,
+ FIELD_GET(HAL_REO_CMD_UPD1_AC, cmd->upd1)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_BAR,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_BAR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_CHECK_2K_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_CHECK_2K_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_RETRY,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_RETRY)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_OOR_MODE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_OOR_MODE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_CHECK,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_CHECK)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_EVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_EVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_UNEVEN_PN,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_UNEVEN_PN)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_PN_HANDLE_ENABLE,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO1_IGNORE_AMPDU_FLG,
+ !!(cmd->upd1 & HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG));
+
+ if (cmd->pn_size == 24)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_24;
+ else if (cmd->pn_size == 48)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_48;
+ else if (cmd->pn_size == 128)
+ cmd->pn_size = HAL_RX_REO_QUEUE_PN_SIZE_128;
+
+ if (cmd->ba_window_size < 1)
+ cmd->ba_window_size = 1;
+
+ if (cmd->ba_window_size == 1)
+ cmd->ba_window_size++;
+
+ desc->info2 =
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_BA_WINDOW_SIZE,
+ cmd->ba_window_size - 1) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_SIZE, cmd->pn_size) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SVLD,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SVLD)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SSN,
+ FIELD_GET(HAL_REO_CMD_UPD2_SSN, cmd->upd2)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_SEQ_2K_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_SEQ_2K_ERR)) |
+ FIELD_PREP(HAL_REO_UPD_RX_QUEUE_INFO2_PN_ERR,
+ !!(cmd->upd2 & HAL_REO_CMD_UPD2_PN_ERR));
+
+ return FIELD_GET(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, desc->cmd.info0);
+}
+
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd)
+{
+ struct hal_tlv_hdr *reo_desc;
+ int ret;
+
+ spin_lock_bh(&srng->lock);
+
+ ath11k_hal_srng_access_begin(ab, srng);
+ reo_desc = (struct hal_tlv_hdr *)ath11k_hal_srng_src_get_next_entry(ab, srng);
+ if (!reo_desc) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ switch (type) {
+ case HAL_REO_CMD_GET_QUEUE_STATS:
+ ret = ath11k_hal_reo_cmd_queue_stats(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_CACHE:
+ ret = ath11k_hal_reo_cmd_flush_cache(&ab->hal, reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_UPDATE_RX_QUEUE:
+ ret = ath11k_hal_reo_cmd_update_rx_queue(reo_desc, cmd);
+ break;
+ case HAL_REO_CMD_FLUSH_QUEUE:
+ case HAL_REO_CMD_UNBLOCK_CACHE:
+ case HAL_REO_CMD_FLUSH_TIMEOUT_LIST:
+ ath11k_warn(ab, "Unsupported reo command %d\n", type);
+ ret = -ENOTSUPP;
+ break;
+ default:
+ ath11k_warn(ab, "Unknown reo command %d\n", type);
+ ret = -EINVAL;
+ break;
+ }
+
+ ath11k_dp_shadow_start_timer(ab, srng, &ab->dp.reo_cmd_timer);
+
+out:
+ ath11k_hal_srng_access_end(ab, srng);
+ spin_unlock_bh(&srng->lock);
+
+ return ret;
+}
+
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager)
+{
+ struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
+ u32 paddr_lo, paddr_hi;
+
+ paddr_lo = lower_32_bits(paddr);
+ paddr_hi = upper_32_bits(paddr);
+ binfo->info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, paddr_lo);
+ binfo->info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR, paddr_hi) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, manager);
+}
+
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm)
+{
+ struct ath11k_buffer_addr *binfo = (struct ath11k_buffer_addr *)desc;
+
+ *paddr =
+ (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR, binfo->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, binfo->info0);
+ *cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, binfo->info1);
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR, binfo->info1);
+}
+
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm)
+{
+ struct hal_rx_msdu_link *link = (struct hal_rx_msdu_link *)link_desc;
+ struct hal_rx_msdu_details *msdu;
+ int i;
+
+ *num_msdus = HAL_NUM_RX_MSDUS_PER_LINK_DESC;
+
+ msdu = &link->msdu_link[0];
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ msdu->buf_addr_info.info1);
+
+ for (i = 0; i < *num_msdus; i++) {
+ msdu = &link->msdu_link[i];
+
+ if (!FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ msdu->buf_addr_info.info0)) {
+ *num_msdus = i;
+ break;
+ }
+ *msdu_cookies = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ msdu->buf_addr_info.info1);
+ msdu_cookies++;
+ }
+}
+
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct hal_reo_dest_ring *desc = (struct hal_reo_dest_ring *)rx_desc;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ enum hal_reo_dest_ring_error_code err_code;
+
+ push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
+ desc->info0);
+ err_code = FIELD_GET(HAL_REO_DEST_RING_INFO0_ERROR_CODE,
+ desc->info0);
+ ab->soc_stats.reo_error[err_code]++;
+
+ if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
+ push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
+ ath11k_warn(ab, "expected error push reason code, received %d\n",
+ push_reason);
+ return -EINVAL;
+ }
+
+ if (FIELD_GET(HAL_REO_DEST_RING_INFO0_BUFFER_TYPE, desc->info0) !=
+ HAL_REO_DEST_RING_BUFFER_TYPE_LINK_DESC) {
+ ath11k_warn(ab, "expected buffer type link_desc");
+ return -EINVAL;
+ }
+
+ ath11k_hal_rx_reo_ent_paddr_get(ab, rx_desc, paddr, desc_bank);
+
+ return 0;
+}
+
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info)
+{
+ struct hal_wbm_release_ring *wbm_desc = desc;
+ enum hal_wbm_rel_desc_type type;
+ enum hal_wbm_rel_src_module rel_src;
+ enum hal_rx_buf_return_buf_manager ret_buf_mgr;
+
+ type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ wbm_desc->info0);
+ /* We expect only WBM_REL buffer type */
+ if (type != HAL_WBM_REL_DESC_TYPE_REL_MSDU) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ rel_src = FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ wbm_desc->info0);
+ if (rel_src != HAL_WBM_REL_SRC_MODULE_RXDMA &&
+ rel_src != HAL_WBM_REL_SRC_MODULE_REO)
+ return -EINVAL;
+
+ ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ wbm_desc->buf_addr_info.info1);
+ if (ret_buf_mgr != HAL_RX_BUF_RBM_SW3_BM) {
+ ab->soc_stats.invalid_rbm++;
+ return -EINVAL;
+ }
+
+ rel_info->cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ wbm_desc->buf_addr_info.info1);
+ rel_info->err_rel_src = rel_src;
+ if (rel_src == HAL_WBM_REL_SRC_MODULE_REO) {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_REO_ERROR_CODE,
+ wbm_desc->info0);
+ } else {
+ rel_info->push_reason =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_PUSH_REASON,
+ wbm_desc->info0);
+ rel_info->err_code =
+ FIELD_GET(HAL_WBM_RELEASE_INFO0_RXDMA_ERROR_CODE,
+ wbm_desc->info0);
+ }
+
+ rel_info->first_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_FIRST_MSDU,
+ wbm_desc->info2);
+ rel_info->last_msdu = FIELD_GET(HAL_WBM_RELEASE_INFO2_LAST_MSDU,
+ wbm_desc->info2);
+ return 0;
+}
+
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank)
+{
+ struct ath11k_buffer_addr *buff_addr = desc;
+
+ *paddr = ((u64)(FIELD_GET(BUFFER_ADDR_INFO1_ADDR, buff_addr->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR, buff_addr->info0);
+
+ *desc_bank = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, buff_addr->info1);
+}
+
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action)
+{
+ struct hal_wbm_release_ring *dst_desc = desc;
+ struct hal_wbm_release_ring *src_desc = link_desc;
+
+ dst_desc->buf_addr_info = src_desc->buf_addr_info;
+ dst_desc->info0 |= FIELD_PREP(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE,
+ HAL_WBM_REL_SRC_MODULE_SW) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_BM_ACTION, action) |
+ FIELD_PREP(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
+ HAL_WBM_REL_DESC_TYPE_MSDU_LINK);
+}
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_get_queue_stats_status *desc =
+ (struct hal_reo_get_queue_stats_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "Queue stats status:\n");
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "header: cmd_num %d status %d\n",
+ status->uniform_hdr.cmd_num,
+ status->uniform_hdr.cmd_status);
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "ssn %ld cur_idx %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_SSN,
+ desc->info0),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO0_CUR_IDX,
+ desc->info0));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "pn = [%08x, %08x, %08x, %08x]\n",
+ desc->pn[0], desc->pn[1], desc->pn[2], desc->pn[3]);
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "last_rx: enqueue_tstamp %08x dequeue_tstamp %08x\n",
+ desc->last_rx_enqueue_timestamp,
+ desc->last_rx_dequeue_timestamp);
+ ath11k_dbg(ab, ATH11K_DBG_HAL,
+ "rx_bitmap [%08x %08x %08x %08x %08x %08x %08x %08x]\n",
+ desc->rx_bitmap[0], desc->rx_bitmap[1], desc->rx_bitmap[2],
+ desc->rx_bitmap[3], desc->rx_bitmap[4], desc->rx_bitmap[5],
+ desc->rx_bitmap[6], desc->rx_bitmap[7]);
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "count: cur_mpdu %ld cur_msdu %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MPDU_COUNT,
+ desc->info1),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO1_MSDU_COUNT,
+ desc->info1));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "fwd_timeout %ld fwd_bar %ld dup_count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_TIMEOUT_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_FDTB_COUNT,
+ desc->info2),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO2_DUPLICATE_COUNT,
+ desc->info2));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "frames_in_order %ld bar_rcvd %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_FIO_COUNT,
+ desc->info3),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO3_BAR_RCVD_CNT,
+ desc->info3));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "num_mpdus %d num_msdus %d total_bytes %d\n",
+ desc->num_mpdu_frames, desc->num_msdu_frames,
+ desc->total_bytes);
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "late_rcvd %ld win_jump_2k %ld hole_cnt %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_LATE_RX_MPDU,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_WINDOW_JMP2K,
+ desc->info4),
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO4_HOLE_COUNT,
+ desc->info4));
+ ath11k_dbg(ab, ATH11K_DBG_HAL, "looping count %ld\n",
+ FIELD_GET(HAL_REO_GET_QUEUE_STATS_STATUS_INFO5_LOOPING_CNT,
+ desc->info5));
+}
+
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *hdr;
+
+ hdr = (struct hal_reo_status_hdr *)tlv->value;
+ *status = FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS, hdr->info0);
+
+ return FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM, hdr->info0);
+}
+
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_queue_status *desc =
+ (struct hal_reo_flush_queue_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+ status->u.flush_queue.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_QUEUE_INFO0_ERR_DETECTED,
+ desc->info0);
+}
+
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_cache_status *desc =
+ (struct hal_reo_flush_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.flush_cache.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.flush_cache.err_code =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_BLOCK_ERR_CODE,
+ desc->info0);
+ if (!status->u.flush_cache.err_code)
+ hal->avail_blk_resource |= BIT(hal->current_blk_index);
+
+ status->u.flush_cache.cache_controller_flush_status_hit =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_STATUS_HIT,
+ desc->info0);
+
+ status->u.flush_cache.cache_controller_flush_status_desc_type =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_DESC_TYPE,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_client_id =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_CLIENT_ID,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_err =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_ERR,
+ desc->info0);
+ status->u.flush_cache.cache_controller_flush_status_cnt =
+ FIELD_GET(HAL_REO_FLUSH_CACHE_STATUS_INFO0_FLUSH_COUNT,
+ desc->info0);
+}
+
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_unblock_cache_status *desc =
+ (struct hal_reo_unblock_cache_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.unblock_cache.err_detected =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.unblock_cache.unblock_type =
+ FIELD_GET(HAL_REO_UNBLOCK_CACHE_STATUS_INFO0_TYPE,
+ desc->info0);
+
+ if (!status->u.unblock_cache.err_detected &&
+ status->u.unblock_cache.unblock_type ==
+ HAL_REO_STATUS_UNBLOCK_BLOCKING_RESOURCE)
+ hal->avail_blk_resource &= ~BIT(hal->current_blk_index);
+}
+
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_flush_timeout_list_status *desc =
+ (struct hal_reo_flush_timeout_list_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.timeout_list.err_detected =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_IS_ERR,
+ desc->info0);
+ status->u.timeout_list.list_empty =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO0_LIST_EMPTY,
+ desc->info0);
+
+ status->u.timeout_list.release_desc_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_REL_DESC_COUNT,
+ desc->info1);
+ status->u.timeout_list.fwd_buf_cnt =
+ FIELD_GET(HAL_REO_FLUSH_TIMEOUT_STATUS_INFO1_FWD_BUF_COUNT,
+ desc->info1);
+}
+
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_desc_thresh_reached_status *desc =
+ (struct hal_reo_desc_thresh_reached_status *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->hdr.info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->hdr.info0);
+
+ status->u.desc_thresh_reached.threshold_idx =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO0_THRESH_INDEX,
+ desc->info0);
+
+ status->u.desc_thresh_reached.link_desc_counter0 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO1_LINK_DESC_COUNTER0,
+ desc->info1);
+
+ status->u.desc_thresh_reached.link_desc_counter1 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO2_LINK_DESC_COUNTER1,
+ desc->info2);
+
+ status->u.desc_thresh_reached.link_desc_counter2 =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO3_LINK_DESC_COUNTER2,
+ desc->info3);
+
+ status->u.desc_thresh_reached.link_desc_counter_sum =
+ FIELD_GET(HAL_REO_DESC_THRESH_STATUS_INFO4_LINK_DESC_COUNTER_SUM,
+ desc->info4);
+}
+
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status)
+{
+ struct hal_tlv_hdr *tlv = (struct hal_tlv_hdr *)reo_desc;
+ struct hal_reo_status_hdr *desc =
+ (struct hal_reo_status_hdr *)tlv->value;
+
+ status->uniform_hdr.cmd_num =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_STATUS_NUM,
+ desc->info0);
+ status->uniform_hdr.cmd_status =
+ FIELD_GET(HAL_REO_STATUS_HDR_INFO0_EXEC_STATUS,
+ desc->info0);
+}
+
+u32 ath11k_hal_reo_qdesc_size(u32 ba_window_size, u8 tid)
+{
+ u32 num_ext_desc;
+
+ if (ba_window_size <= 1) {
+ if (tid != HAL_DESC_REO_NON_QOS_TID)
+ num_ext_desc = 1;
+ else
+ num_ext_desc = 0;
+ } else if (ba_window_size <= 105) {
+ num_ext_desc = 1;
+ } else if (ba_window_size <= 210) {
+ num_ext_desc = 2;
+ } else {
+ num_ext_desc = 3;
+ }
+
+ return sizeof(struct hal_rx_reo_queue) +
+ (num_ext_desc * sizeof(struct hal_rx_reo_queue_ext));
+}
+
+void ath11k_hal_reo_qdesc_setup(void *vaddr, int tid, u32 ba_window_size,
+ u32 start_seq, enum hal_pn_type type)
+{
+ struct hal_rx_reo_queue *qdesc = (struct hal_rx_reo_queue *)vaddr;
+ struct hal_rx_reo_queue_ext *ext_desc;
+
+ memset(qdesc, 0, sizeof(*qdesc));
+
+ ath11k_hal_reo_set_desc_hdr(&qdesc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0);
+
+ qdesc->rx_queue_num = FIELD_PREP(HAL_RX_REO_QUEUE_RX_QUEUE_NUMBER, tid);
+
+ qdesc->info0 =
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_VLD, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_ASSOC_LNK_DESC_COUNTER, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_AC, ath11k_tid_to_ac(tid));
+
+ if (ba_window_size < 1)
+ ba_window_size = 1;
+
+ if (ba_window_size == 1 && tid != HAL_DESC_REO_NON_QOS_TID)
+ ba_window_size++;
+
+ if (ba_window_size == 1)
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_RETRY, 1);
+
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_BA_WINDOW_SIZE,
+ ba_window_size - 1);
+ switch (type) {
+ case HAL_PN_TYPE_NONE:
+ case HAL_PN_TYPE_WAPI_EVEN:
+ case HAL_PN_TYPE_WAPI_UNEVEN:
+ break;
+ case HAL_PN_TYPE_WPA:
+ qdesc->info0 |=
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_CHECK, 1) |
+ FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_PN_SIZE,
+ HAL_RX_REO_QUEUE_PN_SIZE_48);
+ break;
+ }
+
+ /* TODO: Set Ignore ampdu flags based on BA window size and/or
+ * AMPDU capabilities
+ */
+ qdesc->info0 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO0_IGNORE_AMPDU_FLG, 1);
+
+ qdesc->info1 |= FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SVLD, 0);
+
+ if (start_seq <= 0xfff)
+ qdesc->info1 = FIELD_PREP(HAL_RX_REO_QUEUE_INFO1_SSN,
+ start_seq);
+
+ if (tid == HAL_DESC_REO_NON_QOS_TID)
+ return;
+
+ ext_desc = qdesc->ext_desc;
+
+ /* TODO: HW queue descriptors are currently allocated for max BA
+ * window size for all QOS TIDs so that same descriptor can be used
+ * later when ADDBA request is received. This should be changed to
+ * allocate HW queue descriptors based on BA window size being
+ * negotiated (0 for non BA cases), and reallocate when BA window
+ * size changes and also send WMI message to FW to change the REO
+ * queue descriptor in Rx peer entry as part of dp_rx_tid_update.
+ */
+ memset(ext_desc, 0, sizeof(*ext_desc));
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1);
+ ext_desc++;
+ memset(ext_desc, 0, sizeof(*ext_desc));
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2);
+ ext_desc++;
+ memset(ext_desc, 0, sizeof(*ext_desc));
+ ath11k_hal_reo_set_desc_hdr(&ext_desc->desc_hdr, HAL_DESC_REO_OWNED,
+ HAL_DESC_REO_QUEUE_EXT_DESC,
+ REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3);
+}
+
+void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ struct hal_reo_get_queue_stats *desc;
+ int i, cmd_num = 1;
+ int entry_size;
+ u8 *entry;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ entry = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)entry;
+ desc = (struct hal_reo_get_queue_stats *)tlv->value;
+ desc->cmd.info0 =
+ FIELD_PREP(HAL_REO_CMD_HDR_INFO0_CMD_NUMBER, cmd_num++);
+ entry += entry_size;
+ }
+}
+
+#define HAL_MAX_UL_MU_USERS 37
+static inline void
+ath11k_hal_rx_handle_ofdma_info(void *rx_tlv,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->ul_ofdma_user_v0_word0 = __le32_to_cpu(ppdu_end_user->info6);
+
+ rx_user_status->ul_ofdma_user_v0_word1 = __le32_to_cpu(ppdu_end_user->rsvd2[10]);
+}
+
+static inline void
+ath11k_hal_rx_populate_byte_count(void *rx_tlv, void *ppduinfo,
+ struct hal_rx_user_status *rx_user_status)
+{
+ struct hal_rx_ppdu_end_user_stats *ppdu_end_user =
+ (struct hal_rx_ppdu_end_user_stats *)rx_tlv;
+
+ rx_user_status->mpdu_ok_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->rsvd2[6]));
+ rx_user_status->mpdu_err_byte_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT,
+ __le32_to_cpu(ppdu_end_user->rsvd2[8]));
+}
+
+static inline void
+ath11k_hal_rx_populate_mu_user_info(void *rx_tlv, struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct hal_rx_user_status *rx_user_status)
+{
+ rx_user_status->ast_index = ppdu_info->ast_index;
+ rx_user_status->tid = ppdu_info->tid;
+ rx_user_status->tcp_msdu_count =
+ ppdu_info->tcp_msdu_count;
+ rx_user_status->udp_msdu_count =
+ ppdu_info->udp_msdu_count;
+ rx_user_status->other_msdu_count =
+ ppdu_info->other_msdu_count;
+ rx_user_status->frame_control = ppdu_info->frame_control;
+ rx_user_status->frame_control_info_valid =
+ ppdu_info->frame_control_info_valid;
+ rx_user_status->data_sequence_control_info_valid =
+ ppdu_info->data_sequence_control_info_valid;
+ rx_user_status->first_data_seq_ctrl =
+ ppdu_info->first_data_seq_ctrl;
+ rx_user_status->preamble_type = ppdu_info->preamble_type;
+ rx_user_status->ht_flags = ppdu_info->ht_flags;
+ rx_user_status->vht_flags = ppdu_info->vht_flags;
+ rx_user_status->he_flags = ppdu_info->he_flags;
+ rx_user_status->rs_flags = ppdu_info->rs_flags;
+
+ rx_user_status->mpdu_cnt_fcs_ok =
+ ppdu_info->num_mpdu_fcs_ok;
+ rx_user_status->mpdu_cnt_fcs_err =
+ ppdu_info->num_mpdu_fcs_err;
+
+ ath11k_hal_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
+}
+
+static u16 ath11k_hal_rx_mpduinfo_get_peerid(struct ath11k_base *ab,
+ struct hal_rx_mpdu_info *mpdu_info)
+{
+ return ab->hw_params.hw_ops->mpdu_info_get_peerid(mpdu_info);
+}
+
+static enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status_tlv(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ u32 tlv_tag, u8 *tlv_data, u32 userid)
+{
+ u32 info0, info1, value;
+ u8 he_dcm = 0, he_stbc = 0;
+ u16 he_gi = 0, he_ltf = 0;
+
+ switch (tlv_tag) {
+ case HAL_RX_PPDU_START: {
+ struct hal_rx_ppdu_start *ppdu_start =
+ (struct hal_rx_ppdu_start *)tlv_data;
+
+ ppdu_info->ppdu_id =
+ FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
+ __le32_to_cpu(ppdu_start->info0));
+ ppdu_info->chan_num = __le32_to_cpu(ppdu_start->chan_num);
+ ppdu_info->ppdu_ts = __le32_to_cpu(ppdu_start->ppdu_start_ts);
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS: {
+ struct hal_rx_ppdu_end_user_stats *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats *)tlv_data;
+
+ info0 = __le32_to_cpu(eu_stats->info0);
+ info1 = __le32_to_cpu(eu_stats->info1);
+
+ ppdu_info->ast_index =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX,
+ __le32_to_cpu(eu_stats->info2));
+ ppdu_info->tid =
+ ffs(FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP,
+ __le32_to_cpu(eu_stats->info6))) - 1;
+ ppdu_info->tcp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->udp_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info4));
+ ppdu_info->other_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->tcp_ack_msdu_count =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT,
+ __le32_to_cpu(eu_stats->info5));
+ ppdu_info->preamble_type =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE, info1);
+ ppdu_info->num_mpdu_fcs_ok =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK,
+ info1);
+ ppdu_info->num_mpdu_fcs_err =
+ FIELD_GET(HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR,
+ info0);
+ switch (ppdu_info->preamble_type) {
+ case HAL_RX_PREAMBLE_11N:
+ ppdu_info->ht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AC:
+ ppdu_info->vht_flags = 1;
+ break;
+ case HAL_RX_PREAMBLE_11AX:
+ ppdu_info->he_flags = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (userid < HAL_MAX_UL_MU_USERS) {
+ struct hal_rx_user_status *rxuser_stats =
+ &ppdu_info->userstats;
+
+ ath11k_hal_rx_handle_ofdma_info(tlv_data, rxuser_stats);
+ ath11k_hal_rx_populate_mu_user_info(tlv_data, ppdu_info,
+ rxuser_stats);
+ }
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[0] =
+ __le32_to_cpu(eu_stats->rsvd1[0]);
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[1] =
+ __le32_to_cpu(eu_stats->rsvd1[1]);
+
+ break;
+ }
+ case HAL_RX_PPDU_END_USER_STATS_EXT: {
+ struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
+ (struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[2] = eu_stats->info1;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[3] = eu_stats->info2;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[4] = eu_stats->info3;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[5] = eu_stats->info4;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[6] = eu_stats->info5;
+ ppdu_info->userstats.mpdu_fcs_ok_bitmap[7] = eu_stats->info6;
+ break;
+ }
+ case HAL_PHYRX_HT_SIG: {
+ struct hal_rx_ht_sig_info *ht_sig =
+ (struct hal_rx_ht_sig_info *)tlv_data;
+
+ info0 = __le32_to_cpu(ht_sig->info0);
+ info1 = __le32_to_cpu(ht_sig->info1);
+
+ ppdu_info->mcs = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_MCS, info0);
+ ppdu_info->bw = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO0_BW, info0);
+ ppdu_info->is_stbc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_STBC,
+ info1);
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING, info1);
+ ppdu_info->gi = info1 & HAL_RX_HT_SIG_INFO_INFO1_GI;
+
+ switch (ppdu_info->mcs) {
+ case 0 ... 7:
+ ppdu_info->nss = 1;
+ break;
+ case 8 ... 15:
+ ppdu_info->nss = 2;
+ break;
+ case 16 ... 23:
+ ppdu_info->nss = 3;
+ break;
+ case 24 ... 31:
+ ppdu_info->nss = 4;
+ break;
+ }
+
+ if (ppdu_info->nss > 1)
+ ppdu_info->mcs = ppdu_info->mcs % 8;
+
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_B: {
+ struct hal_rx_lsig_b_info *lsigb =
+ (struct hal_rx_lsig_b_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_B_INFO_INFO0_RATE,
+ __le32_to_cpu(lsigb->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_L_SIG_A: {
+ struct hal_rx_lsig_a_info *lsiga =
+ (struct hal_rx_lsig_a_info *)tlv_data;
+
+ ppdu_info->rate = FIELD_GET(HAL_RX_LSIG_A_INFO_INFO0_RATE,
+ __le32_to_cpu(lsiga->info0));
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_VHT_SIG_A: {
+ struct hal_rx_vht_sig_a_info *vht_sig =
+ (struct hal_rx_vht_sig_a_info *)tlv_data;
+ u32 nsts;
+ u32 group_id;
+ u8 gi_setting;
+
+ info0 = __le32_to_cpu(vht_sig->info0);
+ info1 = __le32_to_cpu(vht_sig->info1);
+
+ ppdu_info->ldpc = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING,
+ info1);
+ ppdu_info->mcs = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_MCS,
+ info1);
+ gi_setting = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING,
+ info1);
+ switch (gi_setting) {
+ case HAL_RX_VHT_SIG_A_NORMAL_GI:
+ ppdu_info->gi = HAL_RX_GI_0_8_US;
+ break;
+ case HAL_RX_VHT_SIG_A_SHORT_GI:
+ case HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY:
+ ppdu_info->gi = HAL_RX_GI_0_4_US;
+ break;
+ }
+
+ ppdu_info->is_stbc = info0 & HAL_RX_VHT_SIG_A_INFO_INFO0_STBC;
+ nsts = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS, info0);
+ if (ppdu_info->is_stbc && nsts > 0)
+ nsts = ((nsts + 1) >> 1) - 1;
+
+ ppdu_info->nss = (nsts & VHT_SIG_SU_NSS_MASK) + 1;
+ ppdu_info->bw = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_BW,
+ info0);
+ ppdu_info->beamformed = info1 &
+ HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED;
+ group_id = FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID,
+ info0);
+ if (group_id == 0 || group_id == 63)
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ else
+ ppdu_info->reception_type =
+ HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ ppdu_info->vht_flag_values5 = group_id;
+ ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
+ ppdu_info->nss);
+ ppdu_info->vht_flag_values2 = ppdu_info->bw;
+ ppdu_info->vht_flag_values4 =
+ FIELD_GET(HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING, info1);
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_SU: {
+ struct hal_rx_he_sig_a_su_info *he_sig_a =
+ (struct hal_rx_he_sig_a_su_info *)tlv_data;
+
+ ppdu_info->he_flags = 1;
+ info0 = __le32_to_cpu(he_sig_a->info0);
+ info1 = __le32_to_cpu(he_sig_a->info1);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND, info0);
+
+ if (value == 0)
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG;
+ else
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU;
+
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 |=
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS, info0);
+ ppdu_info->mcs = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, value);
+
+ he_dcm = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM, info0);
+ ppdu_info->dcm = he_dcm;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, he_dcm);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING, info1);
+ ppdu_info->ldpc = (value == HAL_RX_SU_MU_CODING_LDPC) ? 1 : 0;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+ he_stbc = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC, info1);
+ ppdu_info->is_stbc = he_stbc;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, he_stbc);
+
+ /* data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /* data5 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_1_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ if (he_dcm && he_stbc) {
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ } else {
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ }
+ break;
+ }
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF, info1);
+ ppdu_info->beamformed = value;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_TXBF, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /* data6 */
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS, info0);
+ value++;
+ ppdu_info->nss = value;
+ ppdu_info->he_data6 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_NSTS, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
+
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_A_MU_DL: {
+ struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
+ (struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_a_mu_dl->info0);
+ info1 = __le32_to_cpu(he_sig_a_mu_dl->info1);
+
+ ppdu_info->he_mu_flags = 1;
+
+ ppdu_info->he_data1 = IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU;
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN;
+
+ ppdu_info->he_data2 =
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN;
+
+ /*data3*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR, info0);
+ ppdu_info->he_data3 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_UL_DL, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA, info1);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC, info1);
+ he_stbc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_STBC, value);
+
+ /*data4*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE, info0);
+ ppdu_info->he_data4 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE, value);
+
+ /*data5*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->bw = value;
+ ppdu_info->he_data5 =
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE, info0);
+ switch (value) {
+ case 0:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_4_X;
+ break;
+ case 1:
+ he_gi = HE_GI_0_8;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 2:
+ he_gi = HE_GI_1_6;
+ he_ltf = HE_LTF_2_X;
+ break;
+ case 3:
+ he_gi = HE_GI_3_2;
+ he_ltf = HE_LTF_4_X;
+ break;
+ }
+ ppdu_info->gi = he_gi;
+ he_gi = (he_gi != 0) ? he_gi - 1 : 0;
+ ppdu_info->he_data5 |= FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_GI, he_gi);
+ ppdu_info->ltf_size = he_ltf;
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE,
+ (he_ltf == HE_LTF_4_X) ? he_ltf - 1 : he_ltf);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB, info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM,
+ info1);
+ ppdu_info->he_data5 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG, value);
+
+ /*data6*/
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION,
+ info0);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_DOPPLER, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION, info1);
+ ppdu_info->he_data6 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA6_TXOP, value);
+
+ /* HE-MU Flags */
+ /* HE-MU-flags1 */
+ ppdu_info->he_flags1 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB, info0);
+ ppdu_info->he_flags1 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN,
+ value);
+
+ /* HE-MU-flags2 */
+ ppdu_info->he_flags2 =
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN;
+
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW,
+ value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB, info0);
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP, value);
+ value = FIELD_GET(HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB, info0);
+ value = value - 1;
+ ppdu_info->he_flags2 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS,
+ value);
+
+ ppdu_info->is_stbc = info1 &
+ HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B1_MU: {
+ struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
+ (struct hal_rx_he_sig_b1_mu_info *)tlv_data;
+ u16 ru_tones;
+
+ info0 = __le32_to_cpu(he_sig_b1_mu->info0);
+
+ ru_tones = FIELD_GET(HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION,
+ info0);
+ ppdu_info->ru_alloc =
+ ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(ru_tones);
+ ppdu_info->he_RU[0] = ru_tones;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_MU: {
+ struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
+ (struct hal_rx_he_sig_b2_mu_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_mu->info0);
+
+ ppdu_info->he_data1 |= IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS, info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS, info0) + 1;
+ break;
+ }
+ case HAL_PHYRX_HE_SIG_B2_OFDMA: {
+ struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
+ (struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
+
+ info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+
+ ppdu_info->he_data1 |=
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN;
+
+ /* HE-data2 */
+ ppdu_info->he_data2 |= IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN;
+
+ ppdu_info->mcs =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS,
+ info0);
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS, ppdu_info->mcs);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM, info0);
+ he_dcm = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM, value);
+
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING, info0);
+ ppdu_info->ldpc = value;
+ ppdu_info->he_data3 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA3_CODING, value);
+
+ /* HE-data4 */
+ value = FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID, info0);
+ ppdu_info->he_data4 |=
+ FIELD_PREP(IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID, value);
+
+ ppdu_info->nss =
+ FIELD_GET(HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS,
+ info0) + 1;
+ ppdu_info->beamformed =
+ info0 & HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF;
+ ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+ break;
+ }
+ case HAL_PHYRX_RSSI_LEGACY: {
+ int i;
+ bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map);
+ struct hal_rx_phyrx_rssi_legacy_info *rssi =
+ (struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+
+ /* TODO: Please note that the combined rssi will not be accurate
+ * in MU case. Rssi in MU needs to be retrieved from
+ * PHYRX_OTHER_RECEIVE_INFO TLV.
+ */
+ ppdu_info->rssi_comb =
+ FIELD_GET(HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB,
+ __le32_to_cpu(rssi->info0));
+
+ if (db2dbm) {
+ for (i = 0; i < ARRAY_SIZE(rssi->preamble); i++) {
+ ppdu_info->rssi_chain_pri20[i] =
+ le32_get_bits(rssi->preamble[i].rssi_2040,
+ HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20);
+ }
+ }
+ break;
+ }
+ case HAL_RX_MPDU_START: {
+ struct hal_rx_mpdu_info *mpdu_info =
+ (struct hal_rx_mpdu_info *)tlv_data;
+ u16 peer_id;
+
+ peer_id = ath11k_hal_rx_mpduinfo_get_peerid(ab, mpdu_info);
+ if (peer_id)
+ ppdu_info->peer_id = peer_id;
+ break;
+ }
+ case HAL_RXPCU_PPDU_END_INFO: {
+ struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
+ (struct hal_rx_ppdu_end_duration *)tlv_data;
+ ppdu_info->rx_duration =
+ FIELD_GET(HAL_RX_PPDU_END_DURATION,
+ __le32_to_cpu(ppdu_rx_duration->info0));
+ ppdu_info->tsft = __le32_to_cpu(ppdu_rx_duration->rsvd0[1]);
+ ppdu_info->tsft = (ppdu_info->tsft << 32) |
+ __le32_to_cpu(ppdu_rx_duration->rsvd0[0]);
+ break;
+ }
+ case HAL_DUMMY:
+ return HAL_RX_MON_STATUS_BUF_DONE;
+ case HAL_RX_PPDU_END_STATUS_DONE:
+ case 0:
+ return HAL_RX_MON_STATUS_PPDU_DONE;
+ default:
+ break;
+ }
+
+ return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+}
+
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb)
+{
+ struct hal_tlv_hdr *tlv;
+ enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+ u16 tlv_tag;
+ u16 tlv_len;
+ u32 tlv_userid = 0;
+ u8 *ptr = skb->data;
+
+ do {
+ tlv = (struct hal_tlv_hdr *)ptr;
+ tlv_tag = FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl);
+ tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
+ tlv_userid = FIELD_GET(HAL_TLV_USR_ID, tlv->tl);
+ ptr += sizeof(*tlv);
+
+ /* The actual length of PPDU_END is the combined length of many PHY
+ * TLVs that follow. Skip the TLV header and
+ * rx_rxpcu_classification_overview that follows the header to get to
+ * next TLV.
+ */
+ if (tlv_tag == HAL_RX_PPDU_END)
+ tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+
+ hal_status = ath11k_hal_rx_parse_mon_status_tlv(ab, ppdu_info,
+ tlv_tag, ptr, tlv_userid);
+ ptr += tlv_len;
+ ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+
+ if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+ break;
+ } while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+
+ return hal_status;
+}
+
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
+ u32 *sw_cookie, void **pp_buf_addr,
+ u8 *rbm, u32 *msdu_cnt)
+{
+ struct hal_reo_entrance_ring *reo_ent_ring =
+ (struct hal_reo_entrance_ring *)rx_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details =
+ (struct rx_mpdu_desc *)&reo_ent_ring->rx_mpdu_info;
+
+ *msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
+ rx_mpdu_desc_info_details->info0);
+
+ buf_addr_info = (struct ath11k_buffer_addr *)&reo_ent_ring->buf_addr_info;
+
+ *paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ buf_addr_info->info0);
+
+ *sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ buf_addr_info->info1);
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ buf_addr_info->info1);
+
+ *pp_buf_addr = (void *)buf_addr_info;
+}
+
+void
+ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
+ struct hal_sw_mon_ring_entries *sw_mon_entries)
+{
+ struct hal_sw_monitor_ring *sw_mon_ring = rx_desc;
+ struct ath11k_buffer_addr *buf_addr_info;
+ struct ath11k_buffer_addr *status_buf_addr_info;
+ struct rx_mpdu_desc *rx_mpdu_desc_info_details;
+
+ rx_mpdu_desc_info_details = &sw_mon_ring->rx_mpdu_info;
+
+ sw_mon_entries->msdu_cnt = FIELD_GET(RX_MPDU_DESC_INFO0_MSDU_COUNT,
+ rx_mpdu_desc_info_details->info0);
+
+ buf_addr_info = &sw_mon_ring->buf_addr_info;
+ status_buf_addr_info = &sw_mon_ring->status_buf_addr_info;
+
+ sw_mon_entries->mon_dst_paddr = (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ buf_addr_info->info0);
+
+ sw_mon_entries->mon_status_paddr =
+ (((u64)FIELD_GET(BUFFER_ADDR_INFO1_ADDR,
+ status_buf_addr_info->info1)) << 32) |
+ FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
+ status_buf_addr_info->info0);
+
+ sw_mon_entries->mon_dst_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ buf_addr_info->info1);
+
+ sw_mon_entries->mon_status_sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
+ status_buf_addr_info->info1);
+
+ sw_mon_entries->status_buf_count = FIELD_GET(HAL_SW_MON_RING_INFO0_STATUS_BUF_CNT,
+ sw_mon_ring->info0);
+
+ sw_mon_entries->dst_buf_addr_info = buf_addr_info;
+ sw_mon_entries->status_buf_addr_info = status_buf_addr_info;
+
+ sw_mon_entries->ppdu_id =
+ FIELD_GET(HAL_SW_MON_RING_INFO1_PHY_PPDU_ID, sw_mon_ring->info1);
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
new file mode 100644
index 0000000000..61bd8416c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -0,0 +1,513 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_RX_H
+#define ATH11K_HAL_RX_H
+
+struct hal_rx_wbm_rel_info {
+ u32 cookie;
+ enum hal_wbm_rel_src_module err_rel_src;
+ enum hal_reo_dest_ring_push_reason push_reason;
+ u32 err_code;
+ bool first_msdu;
+ bool last_msdu;
+};
+
+#define HAL_INVALID_PEERID 0xffff
+#define VHT_SIG_SU_NSS_MASK 0x7
+
+#define HAL_RX_MAX_MCS 12
+#define HAL_RX_MAX_NSS 8
+
+struct hal_rx_mon_status_tlv_hdr {
+ u32 hdr;
+ u8 value[];
+};
+
+enum hal_rx_su_mu_coding {
+ HAL_RX_SU_MU_CODING_BCC,
+ HAL_RX_SU_MU_CODING_LDPC,
+ HAL_RX_SU_MU_CODING_MAX,
+};
+
+enum hal_rx_gi {
+ HAL_RX_GI_0_8_US,
+ HAL_RX_GI_0_4_US,
+ HAL_RX_GI_1_6_US,
+ HAL_RX_GI_3_2_US,
+ HAL_RX_GI_MAX,
+};
+
+enum hal_rx_bw {
+ HAL_RX_BW_20MHZ,
+ HAL_RX_BW_40MHZ,
+ HAL_RX_BW_80MHZ,
+ HAL_RX_BW_160MHZ,
+ HAL_RX_BW_MAX,
+};
+
+enum hal_rx_preamble {
+ HAL_RX_PREAMBLE_11A,
+ HAL_RX_PREAMBLE_11B,
+ HAL_RX_PREAMBLE_11N,
+ HAL_RX_PREAMBLE_11AC,
+ HAL_RX_PREAMBLE_11AX,
+ HAL_RX_PREAMBLE_MAX,
+};
+
+enum hal_rx_reception_type {
+ HAL_RX_RECEPTION_TYPE_SU,
+ HAL_RX_RECEPTION_TYPE_MU_MIMO,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA,
+ HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO,
+ HAL_RX_RECEPTION_TYPE_MAX,
+};
+
+#define HAL_RX_FCS_LEN 4
+
+enum hal_rx_mon_status {
+ HAL_RX_MON_STATUS_PPDU_NOT_DONE,
+ HAL_RX_MON_STATUS_PPDU_DONE,
+ HAL_RX_MON_STATUS_BUF_DONE,
+};
+
+struct hal_rx_user_status {
+ u32 mcs:4,
+ nss:3,
+ ofdma_info_valid:1,
+ dl_ofdma_ru_start_index:7,
+ dl_ofdma_ru_width:7,
+ dl_ofdma_ru_size:8;
+ u32 ul_ofdma_user_v0_word0;
+ u32 ul_ofdma_user_v0_word1;
+ u32 ast_index;
+ u32 tid;
+ u16 tcp_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 frame_control;
+ u8 frame_control_info_valid;
+ u8 data_sequence_control_info_valid;
+ u16 first_data_seq_ctrl;
+ u32 preamble_type;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u8 rs_flags;
+ u32 mpdu_cnt_fcs_ok;
+ u32 mpdu_cnt_fcs_err;
+ u32 mpdu_fcs_ok_bitmap[8];
+ u32 mpdu_ok_byte_count;
+ u32 mpdu_err_byte_count;
+};
+
+#define HAL_TLV_STATUS_PPDU_NOT_DONE HAL_RX_MON_STATUS_PPDU_NOT_DONE
+#define HAL_TLV_STATUS_PPDU_DONE HAL_RX_MON_STATUS_PPDU_DONE
+#define HAL_TLV_STATUS_BUF_DONE HAL_RX_MON_STATUS_BUF_DONE
+
+struct hal_sw_mon_ring_entries {
+ dma_addr_t mon_dst_paddr;
+ dma_addr_t mon_status_paddr;
+ u32 mon_dst_sw_cookie;
+ u32 mon_status_sw_cookie;
+ void *dst_buf_addr_info;
+ void *status_buf_addr_info;
+ u16 ppdu_id;
+ u8 status_buf_count;
+ u8 msdu_cnt;
+ bool end_of_ppdu;
+ bool drop_ppdu;
+};
+
+struct hal_rx_mon_ppdu_info {
+ u32 ppdu_id;
+ u32 ppdu_ts;
+ u32 num_mpdu_fcs_ok;
+ u32 num_mpdu_fcs_err;
+ u32 preamble_type;
+ u16 chan_num;
+ u16 tcp_msdu_count;
+ u16 tcp_ack_msdu_count;
+ u16 udp_msdu_count;
+ u16 other_msdu_count;
+ u16 peer_id;
+ u8 rate;
+ u8 mcs;
+ u8 nss;
+ u8 bw;
+ u8 vht_flag_values1;
+ u8 vht_flag_values2;
+ u8 vht_flag_values3[4];
+ u8 vht_flag_values4;
+ u8 vht_flag_values5;
+ u16 vht_flag_values6;
+ u8 is_stbc;
+ u8 gi;
+ u8 ldpc;
+ u8 beamformed;
+ u8 rssi_comb;
+ u8 rssi_chain_pri20[HAL_RX_MAX_NSS];
+ u8 tid;
+ u16 ht_flags;
+ u16 vht_flags;
+ u16 he_flags;
+ u16 he_mu_flags;
+ u8 dcm;
+ u8 ru_alloc;
+ u8 reception_type;
+ u64 tsft;
+ u64 rx_duration;
+ u16 frame_control;
+ u32 ast_index;
+ u8 rs_fcs_err;
+ u8 rs_flags;
+ u8 cck_flag;
+ u8 ofdm_flag;
+ u8 ulofdma_flag;
+ u8 frame_control_info_valid;
+ u16 he_per_user_1;
+ u16 he_per_user_2;
+ u8 he_per_user_position;
+ u8 he_per_user_known;
+ u16 he_flags1;
+ u16 he_flags2;
+ u8 he_RU[4];
+ u16 he_data1;
+ u16 he_data2;
+ u16 he_data3;
+ u16 he_data4;
+ u16 he_data5;
+ u16 he_data6;
+ u32 ppdu_len;
+ u32 prev_ppdu_id;
+ u32 device_id;
+ u16 first_data_seq_ctrl;
+ u8 monitor_direct_used;
+ u8 data_sequence_control_info_valid;
+ u8 ltf_size;
+ u8 rxpcu_filter_pass;
+ char rssi_chain[8][8];
+ struct hal_rx_user_status userstats;
+};
+
+#define HAL_RX_PPDU_START_INFO0_PPDU_ID GENMASK(15, 0)
+
+struct hal_rx_ppdu_start {
+ __le32 info0;
+ __le32 chan_num;
+ __le32 ppdu_start_ts;
+} __packed;
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR GENMASK(25, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK GENMASK(8, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID BIT(9)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_QOS_CTRL_VALID BIT(10)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_HT_CTRL_VALID BIT(11)
+#define HAL_RX_PPDU_END_USER_STATS_INFO1_PKT_TYPE GENMASK(23, 20)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO2_FRAME_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO3_QOS_CTRL GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_UDP_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_OTHER_MSDU_CNT GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO5_TCP_ACK_MSDU_CNT GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP GENMASK(15, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO6_TID_EOSP_BITMAP GENMASK(31, 16)
+
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_6_MPDU_OK_BYTE_COUNT GENMASK(24, 0)
+#define HAL_RX_PPDU_END_USER_STATS_RSVD2_8_MPDU_ERR_BYTE_COUNT GENMASK(24, 0)
+
+struct hal_rx_ppdu_end_user_stats {
+ __le32 rsvd0[2];
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 info3;
+ __le32 ht_ctrl;
+ __le32 rsvd1[2];
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le32 rsvd2[11];
+} __packed;
+
+struct hal_rx_ppdu_end_user_stats_ext {
+ u32 info0;
+ u32 info1;
+ u32 info2;
+ u32 info3;
+ u32 info4;
+ u32 info5;
+ u32 info6;
+} __packed;
+
+#define HAL_RX_HT_SIG_INFO_INFO0_MCS GENMASK(6, 0)
+#define HAL_RX_HT_SIG_INFO_INFO0_BW BIT(7)
+
+#define HAL_RX_HT_SIG_INFO_INFO1_STBC GENMASK(5, 4)
+#define HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING BIT(6)
+#define HAL_RX_HT_SIG_INFO_INFO1_GI BIT(7)
+
+struct hal_rx_ht_sig_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_LSIG_B_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_B_INFO_INFO0_LEN GENMASK(15, 4)
+
+struct hal_rx_lsig_b_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_LSIG_A_INFO_INFO0_RATE GENMASK(3, 0)
+#define HAL_RX_LSIG_A_INFO_INFO0_LEN GENMASK(16, 5)
+#define HAL_RX_LSIG_A_INFO_INFO0_PKT_TYPE GENMASK(27, 24)
+
+struct hal_rx_lsig_a_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_BW GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_STBC BIT(3)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID GENMASK(9, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO0_NSTS GENMASK(21, 10)
+
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_GI_SETTING GENMASK(1, 0)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING BIT(2)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_MCS GENMASK(7, 4)
+#define HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED BIT(8)
+
+struct hal_rx_vht_sig_a_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+enum hal_rx_vht_sig_a_gi_setting {
+ HAL_RX_VHT_SIG_A_NORMAL_GI = 0,
+ HAL_RX_VHT_SIG_A_SHORT_GI = 1,
+ HAL_RX_VHT_SIG_A_SHORT_GI_AMBIGUITY = 3,
+};
+
+#define HAL_RX_SU_MU_CODING_LDPC 0x01
+
+#define HE_GI_0_8 0
+#define HE_GI_0_4 1
+#define HE_GI_1_6 2
+#define HE_GI_3_2 3
+
+#define HE_LTF_1_X 0
+#define HE_LTF_2_X 1
+#define HE_LTF_4_X 2
+#define HE_LTF_UNKNOWN 3
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_MCS GENMASK(6, 3)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_TRANSMIT_BW GENMASK(20, 19)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_CP_LTF_SIZE GENMASK(22, 21)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS GENMASK(25, 23)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BSS_COLOR GENMASK(13, 8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_SPATIAL_REUSE GENMASK(18, 15)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_FORMAT_IND BIT(0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_BEAM_CHANGE BIT(1)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO0_DL_UL_FLAG BIT(2)
+
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_LDPC_EXTRA BIT(8)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC BIT(9)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_FACTOR GENMASK(12, 11)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_PKT_EXT_PE_DISAM BIT(13)
+#define HAL_RX_HE_SIG_A_SU_INFO_INFO1_DOPPLER_IND BIT(15)
+
+struct hal_rx_he_sig_a_su_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_UL_FLAG BIT(1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_MCS_OF_SIGB GENMASK(3, 1)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DCM_OF_SIGB BIT(4)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_BSS_COLOR GENMASK(10, 5)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_SPATIAL_REUSE GENMASK(14, 11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_TRANSMIT_BW GENMASK(17, 15)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_NUM_SIGB_SYMB GENMASK(21, 18)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_COMP_MODE_SIGB BIT(22)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_CP_LTF_SIZE GENMASK(24, 23)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO0_DOPPLER_INDICATION BIT(25)
+
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXOP_DURATION GENMASK(6, 0)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_CODING BIT(7)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_NUM_LTF_SYMB GENMASK(10, 8)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_LDPC_EXTRA BIT(11)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_STBC BIT(12)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_TXBF BIT(10)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_FACTOR GENMASK(14, 13)
+#define HAL_RX_HE_SIG_A_MU_DL_INFO_INFO1_PKT_EXT_PE_DISAM BIT(15)
+
+struct hal_rx_he_sig_a_mu_dl_info {
+ __le32 info0;
+ __le32 info1;
+} __packed;
+
+#define HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION GENMASK(7, 0)
+
+struct hal_rx_he_sig_b1_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_CODING BIT(20)
+#define HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS GENMASK(31, 29)
+
+struct hal_rx_he_sig_b2_mu_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID GENMASK(10, 0)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS GENMASK(13, 11)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS GENMASK(18, 15)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING BIT(20)
+
+struct hal_rx_he_sig_b2_ofdma_info {
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RSSI_COMB GENMASK(15, 8)
+
+#define HAL_RX_PHYRX_RSSI_PREAMBLE_PRI20 GENMASK(7, 0)
+
+struct hal_rx_phyrx_chain_rssi {
+ __le32 rssi_2040;
+ __le32 rssi_80;
+} __packed;
+
+struct hal_rx_phyrx_rssi_legacy_info {
+ __le32 rsvd[3];
+ struct hal_rx_phyrx_chain_rssi pre_rssi[HAL_RX_MAX_NSS];
+ struct hal_rx_phyrx_chain_rssi preamble[HAL_RX_MAX_NSS];
+ __le32 info0;
+} __packed;
+
+#define HAL_RX_MPDU_INFO_INFO0_PEERID GENMASK(31, 16)
+#define HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855 GENMASK(15, 0)
+#define HAL_RX_MPDU_INFO_INFO1_MPDU_LEN GENMASK(13, 0)
+
+struct hal_rx_mpdu_info_ipq8074 {
+ __le32 rsvd0;
+ __le32 info0;
+ __le32 rsvd1[11];
+ __le32 info1;
+ __le32 rsvd2[9];
+} __packed;
+
+struct hal_rx_mpdu_info_qcn9074 {
+ __le32 rsvd0[10];
+ __le32 info0;
+ __le32 rsvd1[2];
+ __le32 info1;
+ __le32 rsvd2[9];
+} __packed;
+
+struct hal_rx_mpdu_info_wcn6855 {
+ __le32 rsvd0[8];
+ __le32 info0;
+ __le32 rsvd1[14];
+} __packed;
+
+struct hal_rx_mpdu_info {
+ union {
+ struct hal_rx_mpdu_info_ipq8074 ipq8074;
+ struct hal_rx_mpdu_info_qcn9074 qcn9074;
+ struct hal_rx_mpdu_info_wcn6855 wcn6855;
+ } u;
+} __packed;
+
+#define HAL_RX_PPDU_END_DURATION GENMASK(23, 0)
+struct hal_rx_ppdu_end_duration {
+ __le32 rsvd0[9];
+ __le32 info0;
+ __le32 rsvd1[4];
+} __packed;
+
+struct hal_rx_rxpcu_classification_overview {
+ u32 rsvd0;
+} __packed;
+
+struct hal_rx_msdu_desc_info {
+ u32 msdu_flags;
+ u16 msdu_len; /* 14 bits for length */
+};
+
+#define HAL_RX_NUM_MSDU_DESC 6
+struct hal_rx_msdu_list {
+ struct hal_rx_msdu_desc_info msdu_info[HAL_RX_NUM_MSDU_DESC];
+ u32 sw_cookie[HAL_RX_NUM_MSDU_DESC];
+ u8 rbm[HAL_RX_NUM_MSDU_DESC];
+};
+
+void ath11k_hal_reo_status_queue_stats(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_queue_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_unblk_cache_status(struct ath11k_base *ab, u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_flush_timeout_list_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_desc_thresh_reached_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+void ath11k_hal_reo_update_rx_reo_queue_status(struct ath11k_base *ab,
+ u32 *reo_desc,
+ struct hal_reo_status *status);
+int ath11k_hal_reo_process_status(u8 *reo_desc, u8 *status);
+void ath11k_hal_rx_msdu_link_info_get(void *link_desc, u32 *num_msdus,
+ u32 *msdu_cookies,
+ enum hal_rx_buf_return_buf_manager *rbm);
+void ath11k_hal_rx_msdu_link_desc_set(struct ath11k_base *ab, void *desc,
+ void *link_desc,
+ enum hal_wbm_rel_bm_act action);
+void ath11k_hal_rx_buf_addr_info_set(void *desc, dma_addr_t paddr,
+ u32 cookie, u8 manager);
+void ath11k_hal_rx_buf_addr_info_get(void *desc, dma_addr_t *paddr,
+ u32 *cookie, u8 *rbm);
+int ath11k_hal_desc_reo_parse_err(struct ath11k_base *ab, u32 *rx_desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
+ struct hal_rx_wbm_rel_info *rel_info);
+void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
+ dma_addr_t *paddr, u32 *desc_bank);
+void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
+ dma_addr_t *paddr, u32 *sw_cookie,
+ void **pp_buf_addr_info, u8 *rbm,
+ u32 *msdu_cnt);
+void
+ath11k_hal_rx_sw_mon_ring_buf_paddr_get(void *rx_desc,
+ struct hal_sw_mon_ring_entries *sw_mon_ent);
+enum hal_rx_mon_status
+ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
+ struct hal_rx_mon_ppdu_info *ppdu_info,
+ struct sk_buff *skb);
+
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_0 0xDDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_1 0xADBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_2 0xBDBEEF
+#define REO_QUEUE_DESC_MAGIC_DEBUG_PATTERN_3 0xCDBEEF
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
new file mode 100644
index 0000000000..d1b0e36e04
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "hal_desc.h"
+#include "hal.h"
+#include "hal_tx.h"
+#include "hif.h"
+
+#define DSCP_TID_MAP_TBL_ENTRY_SIZE 64
+
+/* dscp_tid_map - Default DSCP-TID mapping
+ *
+ * DSCP TID
+ * 000000 0
+ * 001000 1
+ * 010000 2
+ * 011000 3
+ * 100000 4
+ * 101000 5
+ * 110000 6
+ * 111000 7
+ */
+static const u8 dscp_tid_map[DSCP_TID_MAP_TBL_ENTRY_SIZE] = {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5,
+ 6, 6, 6, 6, 6, 6, 6, 6,
+ 7, 7, 7, 7, 7, 7, 7, 7,
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti)
+{
+ struct hal_tcl_data_cmd *tcl_cmd = (struct hal_tcl_data_cmd *)cmd;
+
+ tcl_cmd->buf_addr_info.info0 =
+ FIELD_PREP(BUFFER_ADDR_INFO0_ADDR, ti->paddr);
+ tcl_cmd->buf_addr_info.info1 =
+ FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
+ ((uint64_t)ti->paddr >> HAL_ADDR_MSB_REG_SHIFT));
+ tcl_cmd->buf_addr_info.info1 |=
+ FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, ti->rbm_id) |
+ FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, ti->desc_id);
+
+ tcl_cmd->info0 =
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_DESC_TYPE, ti->type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCAP_TYPE, ti->encap_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ENCRYPT_TYPE,
+ ti->encrypt_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_SEARCH_TYPE,
+ ti->search_type) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_ADDR_EN,
+ ti->addr_search_flags) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO0_CMD_NUM,
+ ti->meta_data_flags);
+
+ tcl_cmd->info1 = ti->flags0 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_DATA_LEN, ti->data_len) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_PKT_OFFSET, ti->pkt_offset);
+
+ tcl_cmd->info2 = ti->flags1 |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID, ti->tid) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_LMAC_ID, ti->lmac_id);
+
+ tcl_cmd->info3 = FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_DSCP_TID_TABLE_IDX,
+ ti->dscp_tid_tbl_idx) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_SEARCH_INDEX,
+ ti->bss_ast_idx) |
+ FIELD_PREP(HAL_TCL_DATA_CMD_INFO3_CACHE_SET_NUM,
+ ti->bss_ast_hash);
+ tcl_cmd->info4 = 0;
+
+ if (ti->enable_mesh)
+ ab->hw_params.hw_ops->tx_mesh_enable(ab, tcl_cmd);
+}
+
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id)
+{
+ u32 ctrl_reg_val;
+ u32 addr;
+ u8 hw_map_val[HAL_DSCP_TID_TBL_SIZE];
+ int i;
+ u32 value;
+ int cnt = 0;
+
+ ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ /* Enable read/write access */
+ ctrl_reg_val |= HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG, ctrl_reg_val);
+
+ addr = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_DSCP_TID_MAP +
+ (4 * id * (HAL_DSCP_TID_TBL_SIZE / 4));
+
+ /* Configure each DSCP-TID mapping in three bits there by configure
+ * three bytes in an iteration.
+ */
+ for (i = 0; i < DSCP_TID_MAP_TBL_ENTRY_SIZE; i += 8) {
+ value = FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP0,
+ dscp_tid_map[i]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP1,
+ dscp_tid_map[i + 1]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP2,
+ dscp_tid_map[i + 2]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP3,
+ dscp_tid_map[i + 3]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP4,
+ dscp_tid_map[i + 4]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP5,
+ dscp_tid_map[i + 5]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP6,
+ dscp_tid_map[i + 6]) |
+ FIELD_PREP(HAL_TCL1_RING_FIELD_DSCP_TID_MAP7,
+ dscp_tid_map[i + 7]);
+ memcpy(&hw_map_val[cnt], (u8 *)&value, 3);
+ cnt += 3;
+ }
+
+ for (i = 0; i < HAL_DSCP_TID_TBL_SIZE; i += 4) {
+ ath11k_hif_write32(ab, addr, *(u32 *)&hw_map_val[i]);
+ addr += 4;
+ }
+
+ /* Disable read/write access */
+ ctrl_reg_val = ath11k_hif_read32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG);
+ ctrl_reg_val &= ~HAL_TCL1_RING_CMN_CTRL_DSCP_TID_MAP_PROG_EN;
+ ath11k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_TCL_REG +
+ HAL_TCL1_RING_CMN_CTRL_REG,
+ ctrl_reg_val);
+}
+
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ struct hal_srng_params params;
+ struct hal_tlv_hdr *tlv;
+ int i, entry_size;
+ u8 *desc;
+
+ memset(&params, 0, sizeof(params));
+
+ entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_TCL_DATA);
+ ath11k_hal_srng_get_params(ab, srng, &params);
+ desc = (u8 *)params.ring_base_vaddr;
+
+ for (i = 0; i < params.num_entries; i++) {
+ tlv = (struct hal_tlv_hdr *)desc;
+ tlv->tl = FIELD_PREP(HAL_TLV_HDR_TAG, HAL_TCL_DATA_CMD) |
+ FIELD_PREP(HAL_TLV_HDR_LEN,
+ sizeof(struct hal_tcl_data_cmd));
+ desc += entry_size;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
new file mode 100644
index 0000000000..c5e88364af
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_HAL_TX_H
+#define ATH11K_HAL_TX_H
+
+#include "hal_desc.h"
+#include "core.h"
+
+#define HAL_TX_ADDRX_EN 1
+#define HAL_TX_ADDRY_EN 2
+
+#define HAL_TX_ADDR_SEARCH_DEFAULT 0
+#define HAL_TX_ADDR_SEARCH_INDEX 1
+
+struct hal_tx_info {
+ u16 meta_data_flags; /* %HAL_TCL_DATA_CMD_INFO0_META_ */
+ u8 ring_id;
+ u32 desc_id;
+ enum hal_tcl_desc_type type;
+ enum hal_tcl_encap_type encap_type;
+ dma_addr_t paddr;
+ u32 data_len;
+ u32 pkt_offset;
+ enum hal_encrypt_type encrypt_type;
+ u32 flags0; /* %HAL_TCL_DATA_CMD_INFO1_ */
+ u32 flags1; /* %HAL_TCL_DATA_CMD_INFO2_ */
+ u16 addr_search_flags; /* %HAL_TCL_DATA_CMD_INFO0_ADDR(X/Y)_ */
+ u16 bss_ast_hash;
+ u16 bss_ast_idx;
+ u8 tid;
+ u8 search_type; /* %HAL_TX_ADDR_SEARCH_ */
+ u8 lmac_id;
+ u8 dscp_tid_tbl_idx;
+ bool enable_mesh;
+ u8 rbm_id;
+};
+
+/* TODO: Check if the actual desc macros can be used instead */
+#define HAL_TX_STATUS_FLAGS_FIRST_MSDU BIT(0)
+#define HAL_TX_STATUS_FLAGS_LAST_MSDU BIT(1)
+#define HAL_TX_STATUS_FLAGS_MSDU_IN_AMSDU BIT(2)
+#define HAL_TX_STATUS_FLAGS_RATE_STATS_VALID BIT(3)
+#define HAL_TX_STATUS_FLAGS_RATE_LDPC BIT(4)
+#define HAL_TX_STATUS_FLAGS_RATE_STBC BIT(5)
+#define HAL_TX_STATUS_FLAGS_OFDMA BIT(6)
+
+#define HAL_TX_STATUS_DESC_LEN sizeof(struct hal_wbm_release_ring)
+
+/* Tx status parsed from srng desc */
+struct hal_tx_status {
+ enum hal_wbm_rel_src_module buf_rel_source;
+ enum hal_wbm_tqm_rel_reason status;
+ u8 ack_rssi;
+ u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
+ u32 ppdu_id;
+ u8 try_cnt;
+ u8 tid;
+ u16 peer_id;
+ u32 rate_stats;
+};
+
+void ath11k_hal_tx_cmd_desc_setup(struct ath11k_base *ab, void *cmd,
+ struct hal_tx_info *ti);
+void ath11k_hal_tx_set_dscp_tid_map(struct ath11k_base *ab, int id);
+int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
+ enum hal_reo_cmd_type type,
+ struct ath11k_hal_reo_cmd *cmd);
+void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab,
+ struct hal_srng *srng);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
new file mode 100644
index 0000000000..659b80d2ab
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+#include "core.h"
+
+struct ath11k_hif_ops {
+ u32 (*read32)(struct ath11k_base *sc, u32 address);
+ void (*write32)(struct ath11k_base *sc, u32 address, u32 data);
+ int (*read)(struct ath11k_base *ab, void *buf, u32 start, u32 end);
+ void (*irq_enable)(struct ath11k_base *sc);
+ void (*irq_disable)(struct ath11k_base *sc);
+ int (*start)(struct ath11k_base *sc);
+ void (*stop)(struct ath11k_base *sc);
+ int (*power_up)(struct ath11k_base *sc);
+ void (*power_down)(struct ath11k_base *sc);
+ int (*suspend)(struct ath11k_base *ab);
+ int (*resume)(struct ath11k_base *ab);
+ int (*map_service_to_pipe)(struct ath11k_base *sc, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+ int (*get_user_msi_vector)(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+ void (*get_msi_address)(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+ void (*ce_irq_enable)(struct ath11k_base *ab);
+ void (*ce_irq_disable)(struct ath11k_base *ab);
+ void (*get_ce_msi_idx)(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
+};
+
+static inline void ath11k_hif_ce_irq_enable(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->ce_irq_enable)
+ ab->hif.ops->ce_irq_enable(ab);
+}
+
+static inline void ath11k_hif_ce_irq_disable(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->ce_irq_disable)
+ ab->hif.ops->ce_irq_disable(ab);
+}
+
+static inline int ath11k_hif_start(struct ath11k_base *sc)
+{
+ return sc->hif.ops->start(sc);
+}
+
+static inline void ath11k_hif_stop(struct ath11k_base *sc)
+{
+ sc->hif.ops->stop(sc);
+}
+
+static inline void ath11k_hif_irq_enable(struct ath11k_base *sc)
+{
+ sc->hif.ops->irq_enable(sc);
+}
+
+static inline void ath11k_hif_irq_disable(struct ath11k_base *sc)
+{
+ sc->hif.ops->irq_disable(sc);
+}
+
+static inline int ath11k_hif_power_up(struct ath11k_base *sc)
+{
+ return sc->hif.ops->power_up(sc);
+}
+
+static inline void ath11k_hif_power_down(struct ath11k_base *sc)
+{
+ sc->hif.ops->power_down(sc);
+}
+
+static inline int ath11k_hif_suspend(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->suspend)
+ return ab->hif.ops->suspend(ab);
+
+ return 0;
+}
+
+static inline int ath11k_hif_resume(struct ath11k_base *ab)
+{
+ if (ab->hif.ops->resume)
+ return ab->hif.ops->resume(ab);
+
+ return 0;
+}
+
+static inline u32 ath11k_hif_read32(struct ath11k_base *sc, u32 address)
+{
+ return sc->hif.ops->read32(sc, address);
+}
+
+static inline void ath11k_hif_write32(struct ath11k_base *sc, u32 address, u32 data)
+{
+ sc->hif.ops->write32(sc, address, data);
+}
+
+static inline int ath11k_hif_read(struct ath11k_base *ab, void *buf,
+ u32 start, u32 end)
+{
+ if (!ab->hif.ops->read)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->read(ab, buf, start, end);
+}
+
+static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ return sc->hif.ops->map_service_to_pipe(sc, service_id, ul_pipe, dl_pipe);
+}
+
+static inline int ath11k_get_user_msi_vector(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ if (!ab->hif.ops->get_user_msi_vector)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->get_user_msi_vector(ab, user_name, num_vectors,
+ user_base_data,
+ base_vector);
+}
+
+static inline void ath11k_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ if (!ab->hif.ops->get_msi_address)
+ return;
+
+ ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi);
+}
+
+static inline void ath11k_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id,
+ u32 *msi_data_idx)
+{
+ if (ab->hif.ops->get_ce_msi_idx)
+ ab->hif.ops->get_ce_msi_idx(ab, ce_id, msi_data_idx);
+ else
+ *msi_data_idx = ce_id;
+}
+
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
new file mode 100644
index 0000000000..2c2e425c86
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -0,0 +1,844 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+
+#include "debug.h"
+#include "hif.h"
+
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ab, int size)
+{
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(size + sizeof(struct ath11k_htc_hdr));
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+
+ /* FW/HTC requires 4-byte aligned streams */
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "Unaligned HTC tx skb\n");
+
+ return skb;
+}
+
+static void ath11k_htc_control_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
+{
+ struct sk_buff *skb;
+ struct ath11k_skb_cb *skb_cb;
+
+ skb = dev_alloc_skb(ATH11K_HTC_CONTROL_BUFFER_SIZE);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, sizeof(struct ath11k_htc_hdr));
+ WARN_ON_ONCE(!IS_ALIGNED((unsigned long)skb->data, 4));
+
+ skb_cb = ATH11K_SKB_CB(skb);
+ memset(skb_cb, 0, sizeof(*skb_cb));
+
+ return skb;
+}
+
+static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_hdr *hdr;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->htc_info = FIELD_PREP(HTC_HDR_ENDPOINTID, ep->eid) |
+ FIELD_PREP(HTC_HDR_PAYLOADLEN,
+ (skb->len - sizeof(*hdr)));
+
+ if (ep->tx_credit_flow_enabled)
+ hdr->htc_info |= FIELD_PREP(HTC_HDR_FLAGS,
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE);
+
+ spin_lock_bh(&ep->htc->tx_lock);
+ hdr->ctrl_info = FIELD_PREP(HTC_HDR_CONTROLBYTES1, ep->seq_no++);
+ spin_unlock_bh(&ep->htc->tx_lock);
+}
+
+int ath11k_htc_send(struct ath11k_htc *htc,
+ enum ath11k_htc_ep_id eid,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc_ep *ep = &htc->endpoint[eid];
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct device *dev = htc->ab->dev;
+ struct ath11k_base *ab = htc->ab;
+ int credits = 0;
+ int ret;
+ bool credit_flow_enabled = (ab->hw_params.credit_flow &&
+ ep->tx_credit_flow_enabled);
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "Invalid endpoint id: %d\n", eid);
+ return -ENOENT;
+ }
+
+ skb_push(skb, sizeof(struct ath11k_htc_hdr));
+
+ if (credit_flow_enabled) {
+ credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
+ spin_lock_bh(&htc->tx_lock);
+ if (ep->tx_credits < credits) {
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "ep %d insufficient credits required %d total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ ret = -EAGAIN;
+ goto err_pull;
+ }
+ ep->tx_credits -= credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "ep %d credits consumed %d total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+ }
+
+ ath11k_htc_prepare_tx_skb(ep, skb);
+
+ skb_cb->eid = eid;
+ skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(dev, skb_cb->paddr);
+ if (ret) {
+ ret = -EIO;
+ goto err_credits;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "tx skb %p eid %d paddr %pad\n",
+ skb, skb_cb->eid, &skb_cb->paddr);
+
+ ret = ath11k_ce_send(htc->ab, skb, ep->ul_pipe_id, ep->eid);
+ if (ret)
+ goto err_unmap;
+
+ return 0;
+
+err_unmap:
+ dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
+err_credits:
+ if (credit_flow_enabled) {
+ spin_lock_bh(&htc->tx_lock);
+ ep->tx_credits += credits;
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "ep %d credits reverted %d total %d\n",
+ eid, credits, ep->tx_credits);
+ spin_unlock_bh(&htc->tx_lock);
+
+ if (ep->ep_ops.ep_tx_credits)
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ }
+err_pull:
+ skb_pull(skb, sizeof(struct ath11k_htc_hdr));
+ return ret;
+}
+
+static void
+ath11k_htc_process_credit_report(struct ath11k_htc *htc,
+ const struct ath11k_htc_credit_report *report,
+ int len,
+ enum ath11k_htc_ep_id eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_ep *ep;
+ int i, n_reports;
+
+ if (len % sizeof(*report))
+ ath11k_warn(ab, "Uneven credit report len %d", len);
+
+ n_reports = len / sizeof(*report);
+
+ spin_lock_bh(&htc->tx_lock);
+ for (i = 0; i < n_reports; i++, report++) {
+ if (report->eid >= ATH11K_HTC_EP_COUNT)
+ break;
+
+ ep = &htc->endpoint[report->eid];
+ ep->tx_credits += report->credits;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "ep %d credits got %d total %d\n",
+ report->eid, report->credits, ep->tx_credits);
+
+ if (ep->ep_ops.ep_tx_credits) {
+ spin_unlock_bh(&htc->tx_lock);
+ ep->ep_ops.ep_tx_credits(htc->ab);
+ spin_lock_bh(&htc->tx_lock);
+ }
+ }
+ spin_unlock_bh(&htc->tx_lock);
+}
+
+static int ath11k_htc_process_trailer(struct ath11k_htc *htc,
+ u8 *buffer,
+ int length,
+ enum ath11k_htc_ep_id src_eid)
+{
+ struct ath11k_base *ab = htc->ab;
+ int status = 0;
+ struct ath11k_htc_record *record;
+ size_t len;
+
+ while (length > 0) {
+ record = (struct ath11k_htc_record *)buffer;
+
+ if (length < sizeof(record->hdr)) {
+ status = -EINVAL;
+ break;
+ }
+
+ if (record->hdr.len > length) {
+ /* no room left in buffer for record */
+ ath11k_warn(ab, "Invalid record length: %d\n",
+ record->hdr.len);
+ status = -EINVAL;
+ break;
+ }
+
+ if (ab->hw_params.credit_flow) {
+ switch (record->hdr.id) {
+ case ATH11K_HTC_RECORD_CREDITS:
+ len = sizeof(struct ath11k_htc_credit_report);
+ if (record->hdr.len < len) {
+ ath11k_warn(ab, "Credit report too long\n");
+ status = -EINVAL;
+ break;
+ }
+ ath11k_htc_process_credit_report(htc,
+ record->credit_report,
+ record->hdr.len,
+ src_eid);
+ break;
+ default:
+ ath11k_warn(ab, "Unhandled record: id:%d length:%d\n",
+ record->hdr.id, record->hdr.len);
+ break;
+ }
+ }
+
+ if (status)
+ break;
+
+ /* multiple records may be present in a trailer */
+ buffer += sizeof(record->hdr) + record->hdr.len;
+ length -= sizeof(record->hdr) + record->hdr.len;
+ }
+
+ return status;
+}
+
+static void ath11k_htc_suspend_complete(struct ath11k_base *ab, bool ack)
+{
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "suspend complete %d\n", ack);
+
+ if (ack)
+ set_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+ else
+ clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ complete(&ab->htc_suspend);
+}
+
+void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_ep *ep;
+ void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
+ u8 eid;
+
+ eid = ATH11K_SKB_CB(skb)->eid;
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ ep = &htc->endpoint[eid];
+ spin_lock_bh(&htc->tx_lock);
+ ep_tx_complete = ep->ep_ops.ep_tx_complete;
+ spin_unlock_bh(&htc->tx_lock);
+ if (!ep_tx_complete) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ ep_tx_complete(htc->ab, skb);
+}
+
+static void ath11k_htc_wakeup_from_suspend(struct ath11k_base *ab)
+{
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "wakeup from suspend is received\n");
+}
+
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ int status = 0;
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_hdr *hdr;
+ struct ath11k_htc_ep *ep;
+ u16 payload_len;
+ u32 message_id, trailer_len = 0;
+ size_t min_len;
+ u8 eid;
+ bool trailer_present;
+
+ hdr = (struct ath11k_htc_hdr *)skb->data;
+ skb_pull(skb, sizeof(*hdr));
+
+ eid = FIELD_GET(HTC_HDR_ENDPOINTID, hdr->htc_info);
+
+ if (eid >= ATH11K_HTC_EP_COUNT) {
+ ath11k_warn(ab, "HTC Rx: invalid eid %d\n", eid);
+ goto out;
+ }
+
+ ep = &htc->endpoint[eid];
+
+ payload_len = FIELD_GET(HTC_HDR_PAYLOADLEN, hdr->htc_info);
+
+ if (payload_len + sizeof(*hdr) > ATH11K_HTC_MAX_LEN) {
+ ath11k_warn(ab, "HTC rx frame too long, len: %zu\n",
+ payload_len + sizeof(*hdr));
+ goto out;
+ }
+
+ if (skb->len < payload_len) {
+ ath11k_warn(ab, "HTC Rx: insufficient length, got %d, expected %d\n",
+ skb->len, payload_len);
+ goto out;
+ }
+
+ /* get flags to check for trailer */
+ trailer_present = (FIELD_GET(HTC_HDR_FLAGS, hdr->htc_info)) &
+ ATH11K_HTC_FLAG_TRAILER_PRESENT;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p trailer_present %d\n",
+ eid, skb, trailer_present);
+
+ if (trailer_present) {
+ u8 *trailer;
+
+ trailer_len = FIELD_GET(HTC_HDR_CONTROLBYTES0, hdr->ctrl_info);
+ min_len = sizeof(struct ath11k_htc_record_hdr);
+
+ if ((trailer_len < min_len) ||
+ (trailer_len > payload_len)) {
+ ath11k_warn(ab, "Invalid trailer length: %d\n",
+ trailer_len);
+ goto out;
+ }
+
+ trailer = (u8 *)hdr;
+ trailer += sizeof(*hdr);
+ trailer += payload_len;
+ trailer -= trailer_len;
+ status = ath11k_htc_process_trailer(htc, trailer,
+ trailer_len, eid);
+ if (status)
+ goto out;
+
+ skb_trim(skb, skb->len - trailer_len);
+ }
+
+ if (trailer_len >= payload_len)
+ /* zero length packet with trailer data, just drop these */
+ goto out;
+
+ if (eid == ATH11K_HTC_EP_0) {
+ struct ath11k_htc_msg *msg = (struct ath11k_htc_msg *)skb->data;
+
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id);
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "rx ep %d skb %p message_id %d\n",
+ eid, skb, message_id);
+
+ switch (message_id) {
+ case ATH11K_HTC_MSG_READY_ID:
+ case ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
+ /* handle HTC control message */
+ if (completion_done(&htc->ctl_resp)) {
+ /* this is a fatal error, target should not be
+ * sending unsolicited messages on the ep 0
+ */
+ ath11k_warn(ab, "HTC rx ctrl still processing\n");
+ complete(&htc->ctl_resp);
+ goto out;
+ }
+
+ htc->control_resp_len =
+ min_t(int, skb->len,
+ ATH11K_HTC_MAX_CTRL_MSG_LEN);
+
+ memcpy(htc->control_resp_buffer, skb->data,
+ htc->control_resp_len);
+
+ complete(&htc->ctl_resp);
+ break;
+ case ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE:
+ ath11k_htc_suspend_complete(ab, true);
+ break;
+ case ATH11K_HTC_MSG_NACK_SUSPEND:
+ ath11k_htc_suspend_complete(ab, false);
+ break;
+ case ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ ath11k_htc_wakeup_from_suspend(ab);
+ break;
+ default:
+ ath11k_warn(ab, "ignoring unsolicited htc ep0 event %ld\n",
+ FIELD_GET(HTC_MSG_MESSAGEID, msg->msg_svc_id));
+ break;
+ }
+ goto out;
+ }
+
+ ep->ep_ops.ep_rx_complete(ab, skb);
+
+ /* poll tx completion for interrupt disabled CE's */
+ ath11k_ce_poll_send_completed(ab, ep->ul_pipe_id);
+
+ /* skb is now owned by the rx completion handler */
+ skb = NULL;
+out:
+ kfree_skb(skb);
+}
+
+static void ath11k_htc_control_rx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ /* This is unexpected. FW is not supposed to send regular rx on this
+ * endpoint.
+ */
+ ath11k_warn(ab, "unexpected htc rx\n");
+ kfree_skb(skb);
+}
+
+static const char *htc_service_name(enum ath11k_htc_svc_id id)
+{
+ switch (id) {
+ case ATH11K_HTC_SVC_ID_RESERVED:
+ return "Reserved";
+ case ATH11K_HTC_SVC_ID_RSVD_CTRL:
+ return "Control";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL:
+ return "WMI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BE:
+ return "DATA BE";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_BK:
+ return "DATA BK";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VI:
+ return "DATA VI";
+ case ATH11K_HTC_SVC_ID_WMI_DATA_VO:
+ return "DATA VO";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1:
+ return "WMI MAC1";
+ case ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2:
+ return "WMI MAC2";
+ case ATH11K_HTC_SVC_ID_NMI_CONTROL:
+ return "NMI Control";
+ case ATH11K_HTC_SVC_ID_NMI_DATA:
+ return "NMI Data";
+ case ATH11K_HTC_SVC_ID_HTT_DATA_MSG:
+ return "HTT Data";
+ case ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS:
+ return "RAW";
+ case ATH11K_HTC_SVC_ID_IPA_TX:
+ return "IPA TX";
+ case ATH11K_HTC_SVC_ID_PKT_LOG:
+ return "PKT LOG";
+ }
+
+ return "Unknown";
+}
+
+static void ath11k_htc_reset_endpoint_states(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_ep *ep;
+ int i;
+
+ for (i = ATH11K_HTC_EP_0; i < ATH11K_HTC_EP_COUNT; i++) {
+ ep = &htc->endpoint[i];
+ ep->service_id = ATH11K_HTC_SVC_ID_UNUSED;
+ ep->max_ep_message_len = 0;
+ ep->max_tx_queue_depth = 0;
+ ep->eid = i;
+ ep->htc = htc;
+ ep->tx_credit_flow_enabled = true;
+ }
+}
+
+static u8 ath11k_htc_get_credit_allocation(struct ath11k_htc *htc,
+ u16 service_id)
+{
+ u8 i, allocation = 0;
+
+ for (i = 0; i < ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
+ if (htc->service_alloc_table[i].service_id == service_id) {
+ allocation =
+ htc->service_alloc_table[i].credit_allocation;
+ }
+ }
+
+ return allocation;
+}
+
+static int ath11k_htc_setup_target_buffer_assignments(struct ath11k_htc *htc)
+{
+ struct ath11k_htc_svc_tx_credits *serv_entry;
+ u32 svc_id[] = {
+ ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2,
+ };
+ int i, credits;
+
+ credits = htc->total_transmit_credits;
+ serv_entry = htc->service_alloc_table;
+
+ if ((htc->wmi_ep_count == 0) ||
+ (htc->wmi_ep_count > ARRAY_SIZE(svc_id)))
+ return -EINVAL;
+
+ /* Divide credits among number of endpoints for WMI */
+ credits = credits / htc->wmi_ep_count;
+ for (i = 0; i < htc->wmi_ep_count; i++) {
+ serv_entry[i].service_id = svc_id[i];
+ serv_entry[i].credit_allocation = credits;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_wait_target(struct ath11k_htc *htc)
+{
+ int i, status = 0;
+ struct ath11k_base *ab = htc->ab;
+ unsigned long time_left;
+ struct ath11k_htc_ready *ready;
+ u16 message_id;
+ u16 credit_count;
+ u16 credit_size;
+
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_warn(ab, "failed to receive control response completion, polling..\n");
+
+ for (i = 0; i < ab->hw_params.ce_count; i++)
+ ath11k_ce_per_engine_service(htc->ab, i);
+
+ time_left =
+ wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_WAIT_TIMEOUT_HZ);
+
+ if (!time_left)
+ status = -ETIMEDOUT;
+ }
+
+ if (status < 0) {
+ ath11k_warn(ab, "ctl_resp never came in (%d)\n", status);
+ return status;
+ }
+
+ if (htc->control_resp_len < sizeof(*ready)) {
+ ath11k_warn(ab, "Invalid HTC ready msg len:%d\n",
+ htc->control_resp_len);
+ return -ECOMM;
+ }
+
+ ready = (struct ath11k_htc_ready *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, ready->id_credit_count);
+ credit_count = FIELD_GET(HTC_READY_MSG_CREDITCOUNT,
+ ready->id_credit_count);
+ credit_size = FIELD_GET(HTC_READY_MSG_CREDITSIZE, ready->size_ep);
+
+ if (message_id != ATH11K_HTC_MSG_READY_ID) {
+ ath11k_warn(ab, "Invalid HTC ready msg: 0x%x\n", message_id);
+ return -ECOMM;
+ }
+
+ htc->total_transmit_credits = credit_count;
+ htc->target_credit_size = credit_size;
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "target ready total_transmit_credits %d target_credit_size %d\n",
+ htc->total_transmit_credits, htc->target_credit_size);
+
+ if ((htc->total_transmit_credits == 0) ||
+ (htc->target_credit_size == 0)) {
+ ath11k_warn(ab, "Invalid credit size received\n");
+ return -ECOMM;
+ }
+
+ /* For QCA6390, wmi endpoint uses 1 credit to avoid
+ * back-to-back write.
+ */
+ if (ab->hw_params.supports_shadow_regs)
+ htc->total_transmit_credits = 1;
+
+ ath11k_htc_setup_target_buffer_assignments(htc);
+
+ return 0;
+}
+
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp)
+{
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_conn_svc *req_msg;
+ struct ath11k_htc_conn_svc_resp resp_msg_dummy;
+ struct ath11k_htc_conn_svc_resp *resp_msg = &resp_msg_dummy;
+ enum ath11k_htc_ep_id assigned_eid = ATH11K_HTC_EP_COUNT;
+ struct ath11k_htc_ep *ep;
+ struct sk_buff *skb;
+ unsigned int max_msg_size = 0;
+ int length, status;
+ unsigned long time_left;
+ bool disable_credit_flow_ctrl = false;
+ u16 message_id, service_id, flags = 0;
+ u8 tx_alloc = 0;
+
+ /* special case for HTC pseudo control service */
+ if (conn_req->service_id == ATH11K_HTC_SVC_ID_RSVD_CTRL) {
+ disable_credit_flow_ctrl = true;
+ assigned_eid = ATH11K_HTC_EP_0;
+ max_msg_size = ATH11K_HTC_MAX_CTRL_MSG_LEN;
+ memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
+ goto setup;
+ }
+
+ tx_alloc = ath11k_htc_get_credit_allocation(htc,
+ conn_req->service_id);
+ if (!tx_alloc)
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "htc service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb) {
+ ath11k_warn(ab, "Failed to allocate HTC packet\n");
+ return -ENOMEM;
+ }
+
+ length = sizeof(*req_msg);
+ skb_put(skb, length);
+ memset(skb->data, 0, length);
+
+ req_msg = (struct ath11k_htc_conn_svc *)skb->data;
+ req_msg->msg_svc_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID);
+
+ flags |= FIELD_PREP(ATH11K_HTC_CONN_FLAGS_RECV_ALLOC, tx_alloc);
+
+ /* Only enable credit flow control for WMI ctrl service */
+ if (!(conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 ||
+ conn_req->service_id == ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2)) {
+ flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ if (!ab->hw_params.credit_flow) {
+ flags |= ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
+ disable_credit_flow_ctrl = true;
+ }
+
+ req_msg->flags_len = FIELD_PREP(HTC_SVC_MSG_CONNECTIONFLAGS, flags);
+ req_msg->msg_svc_id |= FIELD_PREP(HTC_SVC_MSG_SERVICE_ID,
+ conn_req->service_id);
+
+ reinit_completion(&htc->ctl_resp);
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ /* wait for response */
+ time_left = wait_for_completion_timeout(&htc->ctl_resp,
+ ATH11K_HTC_CONN_SVC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_err(ab, "Service connect timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ /* we controlled the buffer creation, it's aligned */
+ resp_msg = (struct ath11k_htc_conn_svc_resp *)htc->control_resp_buffer;
+ message_id = FIELD_GET(HTC_MSG_MESSAGEID, resp_msg->msg_svc_id);
+ service_id = FIELD_GET(HTC_SVC_RESP_MSG_SERVICEID,
+ resp_msg->msg_svc_id);
+
+ if ((message_id != ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
+ (htc->control_resp_len < sizeof(*resp_msg))) {
+ ath11k_err(ab, "Invalid resp message ID 0x%x", message_id);
+ return -EPROTO;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_HTC,
+ "service %s connect response status 0x%lx assigned ep 0x%lx\n",
+ htc_service_name(service_id),
+ FIELD_GET(HTC_SVC_RESP_MSG_STATUS, resp_msg->flags_len),
+ FIELD_GET(HTC_SVC_RESP_MSG_ENDPOINTID, resp_msg->flags_len));
+
+ conn_resp->connect_resp_code = FIELD_GET(HTC_SVC_RESP_MSG_STATUS,
+ resp_msg->flags_len);
+
+ /* check response status */
+ if (conn_resp->connect_resp_code != ATH11K_HTC_CONN_SVC_STATUS_SUCCESS) {
+ ath11k_err(ab, "HTC Service %s connect request failed: 0x%x)\n",
+ htc_service_name(service_id),
+ conn_resp->connect_resp_code);
+ return -EPROTO;
+ }
+
+ assigned_eid = (enum ath11k_htc_ep_id)FIELD_GET(
+ HTC_SVC_RESP_MSG_ENDPOINTID,
+ resp_msg->flags_len);
+
+ max_msg_size = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+setup:
+
+ if (assigned_eid >= ATH11K_HTC_EP_COUNT)
+ return -EPROTO;
+
+ if (max_msg_size == 0)
+ return -EPROTO;
+
+ ep = &htc->endpoint[assigned_eid];
+ ep->eid = assigned_eid;
+
+ if (ep->service_id != ATH11K_HTC_SVC_ID_UNUSED)
+ return -EPROTO;
+
+ /* return assigned endpoint to caller */
+ conn_resp->eid = assigned_eid;
+ conn_resp->max_msg_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+
+ /* setup the endpoint */
+ ep->service_id = conn_req->service_id;
+ ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
+ ep->max_ep_message_len = FIELD_GET(HTC_SVC_RESP_MSG_MAXMSGSIZE,
+ resp_msg->flags_len);
+ ep->tx_credits = tx_alloc;
+
+ /* copy all the callbacks */
+ ep->ep_ops = conn_req->ep_ops;
+
+ status = ath11k_hif_map_service_to_pipe(htc->ab,
+ ep->service_id,
+ &ep->ul_pipe_id,
+ &ep->dl_pipe_id);
+ if (status)
+ return status;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
+ htc_service_name(ep->service_id), ep->ul_pipe_id,
+ ep->dl_pipe_id, ep->eid);
+
+ if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
+ ep->tx_credit_flow_enabled = false;
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "htc service '%s' eid %d tx flow control disabled\n",
+ htc_service_name(ep->service_id), assigned_eid);
+ }
+
+ return status;
+}
+
+int ath11k_htc_start(struct ath11k_htc *htc)
+{
+ struct sk_buff *skb;
+ int status = 0;
+ struct ath11k_base *ab = htc->ab;
+ struct ath11k_htc_setup_complete_extended *msg;
+
+ skb = ath11k_htc_build_tx_ctrl_skb(htc->ab);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, sizeof(*msg));
+ memset(skb->data, 0, skb->len);
+
+ msg = (struct ath11k_htc_setup_complete_extended *)skb->data;
+ msg->msg_id = FIELD_PREP(HTC_MSG_MESSAGEID,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID);
+
+ if (ab->hw_params.credit_flow)
+ ath11k_dbg(ab, ATH11K_DBG_HTC, "using tx credit flow control\n");
+ else
+ msg->flags |= ATH11K_GLOBAL_DISABLE_CREDIT_FLOW;
+
+ status = ath11k_htc_send(htc, ATH11K_HTC_EP_0, skb);
+ if (status) {
+ kfree_skb(skb);
+ return status;
+ }
+
+ return 0;
+}
+
+int ath11k_htc_init(struct ath11k_base *ab)
+{
+ struct ath11k_htc *htc = &ab->htc;
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+ int ret;
+
+ spin_lock_init(&htc->tx_lock);
+
+ ath11k_htc_reset_endpoint_states(htc);
+
+ htc->ab = ab;
+
+ switch (ab->wmi_ab.preferred_hw_mode) {
+ case WMI_HOST_HW_MODE_SINGLE:
+ htc->wmi_ep_count = 1;
+ break;
+ case WMI_HOST_HW_MODE_DBS:
+ case WMI_HOST_HW_MODE_DBS_OR_SBS:
+ htc->wmi_ep_count = 2;
+ break;
+ case WMI_HOST_HW_MODE_DBS_SBS:
+ htc->wmi_ep_count = 3;
+ break;
+ default:
+ htc->wmi_ep_count = ab->hw_params.max_radios;
+ break;
+ }
+
+ /* setup our pseudo HTC control endpoint connection */
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+ conn_req.ep_ops.ep_tx_complete = ath11k_htc_control_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_htc_control_rx_complete;
+ conn_req.max_send_queue_depth = ATH11K_NUM_CONTROL_TX_BUFFERS;
+ conn_req.service_id = ATH11K_HTC_SVC_ID_RSVD_CTRL;
+
+ /* connect fake service */
+ ret = ath11k_htc_connect_service(htc, &conn_req, &conn_resp);
+ if (ret) {
+ ath11k_err(ab, "could not connect to htc service (%d)\n", ret);
+ return ret;
+ }
+
+ init_completion(&htc->ctl_resp);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/htc.h b/drivers/net/wireless/ath/ath11k/htc.h
new file mode 100644
index 0000000000..f429b37cfd
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/htc.h
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_HTC_H
+#define ATH11K_HTC_H
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/skbuff.h>
+#include <linux/timer.h>
+
+struct ath11k_base;
+
+#define HTC_HDR_ENDPOINTID GENMASK(7, 0)
+#define HTC_HDR_FLAGS GENMASK(15, 8)
+#define HTC_HDR_PAYLOADLEN GENMASK(31, 16)
+#define HTC_HDR_CONTROLBYTES0 GENMASK(7, 0)
+#define HTC_HDR_CONTROLBYTES1 GENMASK(15, 8)
+#define HTC_HDR_RESERVED GENMASK(31, 16)
+
+#define HTC_SVC_MSG_SERVICE_ID GENMASK(31, 16)
+#define HTC_SVC_MSG_CONNECTIONFLAGS GENMASK(15, 0)
+#define HTC_SVC_MSG_SERVICEMETALENGTH GENMASK(23, 16)
+#define HTC_READY_MSG_CREDITCOUNT GENMASK(31, 16)
+#define HTC_READY_MSG_CREDITSIZE GENMASK(15, 0)
+#define HTC_READY_MSG_MAXENDPOINTS GENMASK(23, 16)
+
+#define HTC_READY_EX_MSG_HTCVERSION GENMASK(7, 0)
+#define HTC_READY_EX_MSG_MAXMSGSPERHTCBUNDLE GENMASK(15, 8)
+
+#define HTC_SVC_RESP_MSG_SERVICEID GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_STATUS GENMASK(7, 0)
+#define HTC_SVC_RESP_MSG_ENDPOINTID GENMASK(15, 8)
+#define HTC_SVC_RESP_MSG_MAXMSGSIZE GENMASK(31, 16)
+#define HTC_SVC_RESP_MSG_SERVICEMETALENGTH GENMASK(7, 0)
+
+#define HTC_MSG_MESSAGEID GENMASK(15, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_SETUPFLAGS GENMASK(31, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_MAXMSGSPERBUNDLEDRECV GENMASK(7, 0)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD0 GENMASK(15, 8)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD1 GENMASK(23, 16)
+#define HTC_SETUP_COMPLETE_EX_MSG_RSVD2 GENMASK(31, 24)
+
+enum ath11k_htc_tx_flags {
+ ATH11K_HTC_FLAG_NEED_CREDIT_UPDATE = 0x01,
+ ATH11K_HTC_FLAG_SEND_BUNDLE = 0x02
+};
+
+enum ath11k_htc_rx_flags {
+ ATH11K_HTC_FLAG_TRAILER_PRESENT = 0x02,
+ ATH11K_HTC_FLAG_BUNDLE_MASK = 0xF0
+};
+
+struct ath11k_htc_hdr {
+ u32 htc_info;
+ u32 ctrl_info;
+} __packed __aligned(4);
+
+enum ath11k_htc_msg_id {
+ ATH11K_HTC_MSG_READY_ID = 1,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_ID = 2,
+ ATH11K_HTC_MSG_CONNECT_SERVICE_RESP_ID = 3,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_ID = 4,
+ ATH11K_HTC_MSG_SETUP_COMPLETE_EX_ID = 5,
+ ATH11K_HTC_MSG_SEND_SUSPEND_COMPLETE = 6,
+ ATH11K_HTC_MSG_NACK_SUSPEND = 7,
+ ATH11K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID = 8,
+};
+
+enum ath11k_htc_version {
+ ATH11K_HTC_VERSION_2P0 = 0x00, /* 2.0 */
+ ATH11K_HTC_VERSION_2P1 = 0x01, /* 2.1 */
+};
+
+#define ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_MASK GENMASK(1, 0)
+#define ATH11K_HTC_CONN_FLAGS_RECV_ALLOC GENMASK(15, 8)
+
+enum ath11k_htc_conn_flags {
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_FOURTH = 0x0,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_ONE_HALF = 0x1,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_THREE_FOURTHS = 0x2,
+ ATH11K_HTC_CONN_FLAGS_THRESHOLD_LEVEL_UNITY = 0x3,
+ ATH11K_HTC_CONN_FLAGS_REDUCE_CREDIT_DRIBBLE = 0x4,
+ ATH11K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL = 0x8,
+};
+
+enum ath11k_htc_conn_svc_status {
+ ATH11K_HTC_CONN_SVC_STATUS_SUCCESS = 0,
+ ATH11K_HTC_CONN_SVC_STATUS_NOT_FOUND = 1,
+ ATH11K_HTC_CONN_SVC_STATUS_FAILED = 2,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_RESOURCES = 3,
+ ATH11K_HTC_CONN_SVC_STATUS_NO_MORE_EP = 4
+};
+
+struct ath11k_htc_ready {
+ u32 id_credit_count;
+ u32 size_ep;
+} __packed;
+
+struct ath11k_htc_ready_extended {
+ struct ath11k_htc_ready base;
+ u32 ver_bundle;
+} __packed;
+
+struct ath11k_htc_conn_svc {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed;
+
+struct ath11k_htc_conn_svc_resp {
+ u32 msg_svc_id;
+ u32 flags_len;
+ u32 svc_meta_pad;
+} __packed;
+
+#define ATH11K_GLOBAL_DISABLE_CREDIT_FLOW BIT(1)
+
+struct ath11k_htc_setup_complete_extended {
+ u32 msg_id;
+ u32 flags;
+ u32 max_msgs_per_bundled_recv;
+} __packed;
+
+struct ath11k_htc_msg {
+ u32 msg_svc_id;
+ u32 flags_len;
+} __packed __aligned(4);
+
+enum ath11k_htc_record_id {
+ ATH11K_HTC_RECORD_NULL = 0,
+ ATH11K_HTC_RECORD_CREDITS = 1
+};
+
+struct ath11k_htc_record_hdr {
+ u8 id; /* @enum ath11k_htc_record_id */
+ u8 len;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_credit_report {
+ u8 eid; /* @enum ath11k_htc_ep_id */
+ u8 credits;
+ u8 pad0;
+ u8 pad1;
+} __packed;
+
+struct ath11k_htc_record {
+ struct ath11k_htc_record_hdr hdr;
+ union {
+ struct ath11k_htc_credit_report credit_report[0];
+ u8 pauload[0];
+ };
+} __packed __aligned(4);
+
+/* note: the trailer offset is dynamic depending
+ * on payload length. this is only a struct layout draft
+ */
+struct ath11k_htc_frame {
+ struct ath11k_htc_hdr hdr;
+ union {
+ struct ath11k_htc_msg msg;
+ u8 payload[0];
+ };
+ struct ath11k_htc_record trailer[0];
+} __packed __aligned(4);
+
+enum ath11k_htc_svc_gid {
+ ATH11K_HTC_SVC_GRP_RSVD = 0,
+ ATH11K_HTC_SVC_GRP_WMI = 1,
+ ATH11K_HTC_SVC_GRP_NMI = 2,
+ ATH11K_HTC_SVC_GRP_HTT = 3,
+ ATH11K_HTC_SVC_GRP_CFG = 4,
+ ATH11K_HTC_SVC_GRP_IPA = 5,
+ ATH11K_HTC_SVC_GRP_PKTLOG = 6,
+
+ ATH11K_HTC_SVC_GRP_TEST = 254,
+ ATH11K_HTC_SVC_GRP_LAST = 255,
+};
+
+#define SVC(group, idx) \
+ (int)(((int)(group) << 8) | (int)(idx))
+
+enum ath11k_htc_svc_id {
+ /* NOTE: service ID of 0x0000 is reserved and should never be used */
+ ATH11K_HTC_SVC_ID_RESERVED = 0x0000,
+ ATH11K_HTC_SVC_ID_UNUSED = ATH11K_HTC_SVC_ID_RESERVED,
+
+ ATH11K_HTC_SVC_ID_RSVD_CTRL = SVC(ATH11K_HTC_SVC_GRP_RSVD, 1),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_WMI, 0),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BE = SVC(ATH11K_HTC_SVC_GRP_WMI, 1),
+ ATH11K_HTC_SVC_ID_WMI_DATA_BK = SVC(ATH11K_HTC_SVC_GRP_WMI, 2),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VI = SVC(ATH11K_HTC_SVC_GRP_WMI, 3),
+ ATH11K_HTC_SVC_ID_WMI_DATA_VO = SVC(ATH11K_HTC_SVC_GRP_WMI, 4),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1 = SVC(ATH11K_HTC_SVC_GRP_WMI, 5),
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 = SVC(ATH11K_HTC_SVC_GRP_WMI, 6),
+
+ ATH11K_HTC_SVC_ID_NMI_CONTROL = SVC(ATH11K_HTC_SVC_GRP_NMI, 0),
+ ATH11K_HTC_SVC_ID_NMI_DATA = SVC(ATH11K_HTC_SVC_GRP_NMI, 1),
+
+ ATH11K_HTC_SVC_ID_HTT_DATA_MSG = SVC(ATH11K_HTC_SVC_GRP_HTT, 0),
+
+ /* raw stream service (i.e. flash, tcmd, calibration apps) */
+ ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS = SVC(ATH11K_HTC_SVC_GRP_TEST, 0),
+ ATH11K_HTC_SVC_ID_IPA_TX = SVC(ATH11K_HTC_SVC_GRP_IPA, 0),
+ ATH11K_HTC_SVC_ID_PKT_LOG = SVC(ATH11K_HTC_SVC_GRP_PKTLOG, 0),
+};
+
+#undef SVC
+
+enum ath11k_htc_ep_id {
+ ATH11K_HTC_EP_UNUSED = -1,
+ ATH11K_HTC_EP_0 = 0,
+ ATH11K_HTC_EP_1 = 1,
+ ATH11K_HTC_EP_2,
+ ATH11K_HTC_EP_3,
+ ATH11K_HTC_EP_4,
+ ATH11K_HTC_EP_5,
+ ATH11K_HTC_EP_6,
+ ATH11K_HTC_EP_7,
+ ATH11K_HTC_EP_8,
+ ATH11K_HTC_EP_COUNT,
+};
+
+struct ath11k_htc_ep_ops {
+ void (*ep_tx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_rx_complete)(struct ath11k_base *, struct sk_buff *);
+ void (*ep_tx_credits)(struct ath11k_base *);
+};
+
+/* service connection information */
+struct ath11k_htc_svc_conn_req {
+ u16 service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+ int max_send_queue_depth;
+};
+
+/* service connection response information */
+struct ath11k_htc_svc_conn_resp {
+ u8 buffer_len;
+ u8 actual_len;
+ enum ath11k_htc_ep_id eid;
+ unsigned int max_msg_len;
+ u8 connect_resp_code;
+};
+
+#define ATH11K_NUM_CONTROL_TX_BUFFERS 2
+#define ATH11K_HTC_MAX_LEN 4096
+#define ATH11K_HTC_MAX_CTRL_MSG_LEN 256
+#define ATH11K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_CONTROL_BUFFER_SIZE (ATH11K_HTC_MAX_CTRL_MSG_LEN + \
+ sizeof(struct ath11k_htc_hdr))
+#define ATH11K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
+#define ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES 8
+
+struct ath11k_htc_ep {
+ struct ath11k_htc *htc;
+ enum ath11k_htc_ep_id eid;
+ enum ath11k_htc_svc_id service_id;
+ struct ath11k_htc_ep_ops ep_ops;
+
+ int max_tx_queue_depth;
+ int max_ep_message_len;
+ u8 ul_pipe_id;
+ u8 dl_pipe_id;
+
+ u8 seq_no; /* for debugging */
+ int tx_credits;
+ bool tx_credit_flow_enabled;
+};
+
+struct ath11k_htc_svc_tx_credits {
+ u16 service_id;
+ u8 credit_allocation;
+};
+
+struct ath11k_htc {
+ struct ath11k_base *ab;
+ struct ath11k_htc_ep endpoint[ATH11K_HTC_EP_COUNT];
+
+ /* protects endpoints */
+ spinlock_t tx_lock;
+
+ u8 control_resp_buffer[ATH11K_HTC_MAX_CTRL_MSG_LEN];
+ int control_resp_len;
+
+ struct completion ctl_resp;
+
+ int total_transmit_credits;
+ struct ath11k_htc_svc_tx_credits
+ service_alloc_table[ATH11K_HTC_MAX_SERVICE_ALLOC_ENTRIES];
+ int target_credit_size;
+ u8 wmi_ep_count;
+};
+
+int ath11k_htc_init(struct ath11k_base *ar);
+int ath11k_htc_wait_target(struct ath11k_htc *htc);
+int ath11k_htc_start(struct ath11k_htc *htc);
+int ath11k_htc_connect_service(struct ath11k_htc *htc,
+ struct ath11k_htc_svc_conn_req *conn_req,
+ struct ath11k_htc_svc_conn_resp *conn_resp);
+int ath11k_htc_send(struct ath11k_htc *htc, enum ath11k_htc_ep_id eid,
+ struct sk_buff *packet);
+struct sk_buff *ath11k_htc_alloc_skb(struct ath11k_base *ar, int size);
+void ath11k_htc_rx_completion_handler(struct ath11k_base *ar,
+ struct sk_buff *skb);
+void ath11k_htc_tx_completion_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
new file mode 100644
index 0000000000..d7b5ec6e69
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -0,0 +1,2858 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "core.h"
+#include "ce.h"
+#include "hif.h"
+#include "hal.h"
+#include "hw.h"
+
+/* Map from pdev index to hw mac index */
+static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
+{
+ switch (pdev_idx) {
+ case 0:
+ return 0;
+ case 1:
+ return 2;
+ case 2:
+ return 1;
+ default:
+ return ATH11K_INVALID_HW_MAC_ID;
+ }
+}
+
+static u8 ath11k_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
+{
+ return pdev_idx;
+}
+
+static void ath11k_hw_ipq8074_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info2 |= FIELD_PREP(HAL_IPQ8074_TCL_DATA_CMD_INFO2_MESH_ENABLE,
+ true);
+}
+
+static void ath11k_hw_qcn9074_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE,
+ true);
+}
+
+static void ath11k_hw_wcn6855_tx_mesh_enable(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd)
+{
+ tcl_cmd->info3 |= FIELD_PREP(HAL_QCN9074_TCL_DATA_CMD_INFO3_MESH_ENABLE,
+ true);
+}
+
+static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
+ struct target_resource_config *config)
+{
+ config->num_vdevs = 4;
+ config->num_peers = 16;
+ config->num_tids = 32;
+
+ config->num_offload_peers = 3;
+ config->num_offload_reorder_buffs = 3;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = 0;
+ config->num_mcast_table_elems = 0;
+ config->mcast2ucast_mode = 0;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = 0;
+ config->dma_burst_size = 0;
+ config->rx_skip_defrag_timeout_dup_detection_check = 0;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = 2;
+ config->num_msdu_desc = 0x400;
+ config->beacon_tx_offload_max_vdev = 2;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+
+ config->peer_map_unmap_v2_support = 0;
+ config->use_pdev_id = 1;
+ config->max_frag_entries = 0xa;
+ config->num_tdls_vdevs = 0x1;
+ config->num_tdls_conn_table_entries = 8;
+ config->beacon_tx_offload_max_vdev = 0x2;
+ config->num_multicast_filter_entries = 0x20;
+ config->num_wow_filters = 0x16;
+ config->num_keep_alive_pattern = 0;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
+}
+
+static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+ /* Each hash entry uses three bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 3 |
+ HAL_HASH_ROUTING_RING_SW3 << 6 |
+ HAL_HASH_ROUTING_RING_SW4 << 9 |
+ HAL_HASH_ROUTING_RING_SW1 << 12 |
+ HAL_HASH_ROUTING_RING_SW2 << 15 |
+ HAL_HASH_ROUTING_RING_SW3 << 18 |
+ HAL_HASH_ROUTING_RING_SW4 << 21;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ FIELD_PREP(HAL_REO_DEST_RING_CTRL_HASH_RING_MAP,
+ ring_hash_map));
+}
+
+static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
+ struct target_resource_config *config)
+{
+ config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS(ab);
+
+ if (ab->num_radios == 2) {
+ config->num_peers = TARGET_NUM_PEERS(ab, DBS);
+ config->num_tids = TARGET_NUM_TIDS(ab, DBS);
+ } else if (ab->num_radios == 3) {
+ config->num_peers = TARGET_NUM_PEERS(ab, DBS_SBS);
+ config->num_tids = TARGET_NUM_TIDS(ab, DBS_SBS);
+ } else {
+ /* Control should not reach here */
+ config->num_peers = TARGET_NUM_PEERS(ab, SINGLE);
+ config->num_tids = TARGET_NUM_TIDS(ab, SINGLE);
+ }
+ config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
+ else
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config->dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config->rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+ config->peer_map_unmap_v2_support = 1;
+ config->twt_ap_pdev_count = ab->num_radios;
+ config->twt_ap_sta_count = 1000;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_ACK_RSSI;
+ config->ema_max_vap_cnt = ab->num_radios;
+ config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+ config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
+}
+
+static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static int ath11k_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath11k_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU,
+ __le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU,
+ __le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->u.ipq8074.msdu_end.info2));
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.ipq8074.hdr_status;
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info2));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_ipq8074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info1));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.ipq8074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_ipq8074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_TID,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start.info2));
+}
+
+static u16 ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.ipq8074.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_ipq8074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.ipq8074.msdu_end, (u8 *)&ldesc->u.ipq8074.msdu_end,
+ sizeof(struct rx_msdu_end_ipq8074));
+ memcpy((u8 *)&fdesc->u.ipq8074.attention, (u8 *)&ldesc->u.ipq8074.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.ipq8074.mpdu_end, (u8 *)&ldesc->u.ipq8074.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.ipq8074.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.ipq8074.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.ipq8074.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static bool ath11k_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.ipq8074.mpdu_start.addr2;
+}
+
+static
+struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.ipq8074.attention;
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.ipq8074.msdu_payload[0];
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO4_FIRST_MSDU,
+ __le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO4_LAST_MSDU,
+ __le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO4_L3_HDR_PADDING,
+ __le16_to_cpu(desc->u.qcn9074.msdu_end.info4));
+}
+
+static u8 *ath11k_hw_qcn9074_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9074.hdr_status;
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
+ RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO9_ENC_TYPE,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info2));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
+}
+
+static bool ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO11_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
+}
+
+static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO11_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11));
+}
+
+static u16 ath11k_hw_qcn9074_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info1));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.qcn9074.msdu_start.info3));
+}
+
+static u8 ath11k_hw_qcn9074_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO9_TID,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start.info9));
+}
+
+static u16 ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9074.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_qcn9074_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.qcn9074.msdu_end, (u8 *)&ldesc->u.qcn9074.msdu_end,
+ sizeof(struct rx_msdu_end_qcn9074));
+ memcpy((u8 *)&fdesc->u.qcn9074.attention, (u8 *)&ldesc->u.qcn9074.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.qcn9074.mpdu_end, (u8 *)&ldesc->u.qcn9074.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.qcn9074.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.qcn9074.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_qcn9074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.qcn9074.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.qcn9074.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static
+struct rx_attention *ath11k_hw_qcn9074_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9074.attention;
+}
+
+static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.qcn9074.msdu_payload[0];
+}
+
+static bool ath11k_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
+ RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9074.mpdu_start.addr2;
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_last_msdu(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MSDU_END_INFO2_LAST_MSDU_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_END_INFO2_L3_HDR_PADDING,
+ __le32_to_cpu(desc->u.wcn6855.msdu_end.info2));
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_get_hdr_status(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn6855.hdr_status;
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_encrypt_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID;
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_encrypt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_ENC_TYPE,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_decap_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_DECAP_FORMAT,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_mesh_ctl(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_MESH_CTRL_PRESENT,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid(struct hal_rx_desc *desc)
+{
+ return !!FIELD_GET(RX_MPDU_START_INFO1_MPDU_FCTRL_VALID,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO1_MPDU_SEQ_NUM,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_msdu_len(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO1_MSDU_LENGTH,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info1));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_sgi(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_SGI,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RATE_MCS,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_RECV_BW,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_msdu_freq(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.msdu_start.phy_meta_data);
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_PKT_TYPE,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_msdu_nss(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO3_MIMO_SS_BITMAP,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info3));
+}
+
+static u8 ath11k_hw_wcn6855_rx_desc_get_mpdu_tid(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MPDU_START_INFO2_TID_WCN6855,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start.info2));
+}
+
+static u16 ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn6855.mpdu_start.sw_peer_id);
+}
+
+static void ath11k_hw_wcn6855_rx_desc_copy_attn_end(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc)
+{
+ memcpy((u8 *)&fdesc->u.wcn6855.msdu_end, (u8 *)&ldesc->u.wcn6855.msdu_end,
+ sizeof(struct rx_msdu_end_wcn6855));
+ memcpy((u8 *)&fdesc->u.wcn6855.attention, (u8 *)&ldesc->u.wcn6855.attention,
+ sizeof(struct rx_attention));
+ memcpy((u8 *)&fdesc->u.wcn6855.mpdu_end, (u8 *)&ldesc->u.wcn6855.mpdu_end,
+ sizeof(struct rx_mpdu_end));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(HAL_TLV_HDR_TAG,
+ __le32_to_cpu(desc->u.wcn6855.mpdu_start_tag));
+}
+
+static u32 ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id(struct hal_rx_desc *desc)
+{
+ return __le16_to_cpu(desc->u.wcn6855.mpdu_start.phy_ppdu_id);
+}
+
+static void ath11k_hw_wcn6855_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16 len)
+{
+ u32 info = __le32_to_cpu(desc->u.wcn6855.msdu_start.info1);
+
+ info &= ~RX_MSDU_START_INFO1_MSDU_LENGTH;
+ info |= FIELD_PREP(RX_MSDU_START_INFO1_MSDU_LENGTH, len);
+
+ desc->u.wcn6855.msdu_start.info1 = __cpu_to_le32(info);
+}
+
+static
+struct rx_attention *ath11k_hw_wcn6855_rx_desc_get_attention(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn6855.attention;
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
+{
+ return &desc->u.wcn6855.msdu_payload[0];
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn6855.mpdu_start.addr2;
+}
+
+static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+ /* Each hash entry uses four bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_MISC_CTL(ab));
+ val &= ~HAL_REO1_MISC_CTL_FRAGMENT_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_MISC_CTL_FRAGMENT_DST_RING, HAL_SRNG_RING_ID_REO2SW1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_MISC_CTL(ab), val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
+
+static void ath11k_hw_ipq5018_reo_setup(struct ath11k_base *ab)
+{
+ u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
+ u32 val;
+
+ /* Each hash entry uses three bits to map to a particular ring. */
+ u32 ring_hash_map = HAL_HASH_ROUTING_RING_SW1 << 0 |
+ HAL_HASH_ROUTING_RING_SW2 << 4 |
+ HAL_HASH_ROUTING_RING_SW3 << 8 |
+ HAL_HASH_ROUTING_RING_SW4 << 12 |
+ HAL_HASH_ROUTING_RING_SW1 << 16 |
+ HAL_HASH_ROUTING_RING_SW2 << 20 |
+ HAL_HASH_ROUTING_RING_SW3 << 24 |
+ HAL_HASH_ROUTING_RING_SW4 << 28;
+
+ val = ath11k_hif_read32(ab, reo_base + HAL_REO1_GEN_ENABLE);
+
+ val &= ~HAL_REO1_GEN_ENABLE_FRAG_DST_RING;
+ val |= FIELD_PREP(HAL_REO1_GEN_ENABLE_FRAG_DST_RING,
+ HAL_SRNG_RING_ID_REO2SW1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_LIST_ENABLE, 1) |
+ FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
+ HAL_DEFAULT_REO_TIMEOUT_USEC);
+
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_1,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_2,
+ ring_hash_map);
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_3,
+ ring_hash_map);
+}
+
+static u16
+ath11k_hw_ipq8074_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
+{
+ u16 peer_id = 0;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
+ __le32_to_cpu(mpdu_info->u.ipq8074.info0));
+
+ return peer_id;
+}
+
+static u16
+ath11k_hw_qcn9074_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
+{
+ u16 peer_id = 0;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID,
+ __le32_to_cpu(mpdu_info->u.qcn9074.info0));
+
+ return peer_id;
+}
+
+static u16
+ath11k_hw_wcn6855_mpdu_info_get_peerid(struct hal_rx_mpdu_info *mpdu_info)
+{
+ u16 peer_id = 0;
+
+ peer_id = FIELD_GET(HAL_RX_MPDU_INFO_INFO0_PEERID_WCN6855,
+ __le32_to_cpu(mpdu_info->u.wcn6855.info0));
+ return peer_id;
+}
+
+static bool ath11k_hw_wcn6855_rx_desc_get_ldpc_support(struct hal_rx_desc *desc)
+{
+ return FIELD_GET(RX_MSDU_START_INFO2_LDPC,
+ __le32_to_cpu(desc->u.wcn6855.msdu_start.info2));
+}
+
+static u32 ath11k_hw_ipq8074_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Let the default ring selection be based on current processor
+ * number, where one of the 3 tcl rings are selected based on
+ * the smp_processor_id(). In case that ring
+ * is full/busy, we resort to other available rings.
+ * If all rings are full, we drop the packet.
+ *
+ * TODO: Add throttling logic when all rings are full
+ */
+ return smp_processor_id();
+}
+
+static u32 ath11k_hw_wcn6750_get_tcl_ring_selector(struct sk_buff *skb)
+{
+ /* Select the TCL ring based on the flow hash of the SKB instead
+ * of CPU ID. Since applications pumping the traffic can be scheduled
+ * on multiple CPUs, there is a chance that packets of the same flow
+ * could end on different TCL rings, this could sometimes results in
+ * an out of order arrival of the packets at the receiver.
+ */
+ return skb_get_hash(skb);
+}
+
+const struct ath11k_hw_ops ipq8074_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops ipq6018_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops qca6390_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_ipq8074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_ipq8074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_ipq8074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_ipq8074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_ipq8074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_ipq8074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_ipq8074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_ipq8074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_ipq8074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_ipq8074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_ipq8074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_ipq8074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_ipq8074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_ipq8074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_ipq8074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_ipq8074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_ipq8074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_ipq8074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_ipq8074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_ipq8074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_ipq8074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_ipq8074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_ipq8074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_ipq8074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_ipq8074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops qcn9074_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_ipq8074_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_qcn9074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops wcn6855_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_wcn6855_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_wcn6855_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_wcn6855_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_wcn6855_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_wcn6855_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_wcn6855_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_wcn6855_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_wcn6855_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_wcn6855_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_wcn6855_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_wcn6855_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_wcn6855_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_wcn6855_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_wcn6855_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_wcn6855_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_wcn6855_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_wcn6855_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_wcn6855_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_wcn6855_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_wcn6855_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_wcn6855_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_wcn6855_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_wcn6855_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_wcn6855_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_wcn6855_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_wcn6855_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+const struct ath11k_hw_ops wcn6750_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .reo_setup = ath11k_hw_wcn6855_reo_setup,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_wcn6750_get_tcl_ring_selector,
+};
+
+/* IPQ5018 hw ops is similar to QCN9074 except for the dest ring remap */
+const struct ath11k_hw_ops ipq5018_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+ .tx_mesh_enable = ath11k_hw_qcn9074_tx_mesh_enable,
+ .rx_desc_get_first_msdu = ath11k_hw_qcn9074_rx_desc_get_first_msdu,
+ .rx_desc_get_last_msdu = ath11k_hw_qcn9074_rx_desc_get_last_msdu,
+ .rx_desc_get_l3_pad_bytes = ath11k_hw_qcn9074_rx_desc_get_l3_pad_bytes,
+ .rx_desc_get_hdr_status = ath11k_hw_qcn9074_rx_desc_get_hdr_status,
+ .rx_desc_encrypt_valid = ath11k_hw_qcn9074_rx_desc_encrypt_valid,
+ .rx_desc_get_encrypt_type = ath11k_hw_qcn9074_rx_desc_get_encrypt_type,
+ .rx_desc_get_decap_type = ath11k_hw_qcn9074_rx_desc_get_decap_type,
+ .rx_desc_get_mesh_ctl = ath11k_hw_qcn9074_rx_desc_get_mesh_ctl,
+ .rx_desc_get_ldpc_support = ath11k_hw_qcn9074_rx_desc_get_ldpc_support,
+ .rx_desc_get_mpdu_seq_ctl_vld = ath11k_hw_qcn9074_rx_desc_get_mpdu_seq_ctl_vld,
+ .rx_desc_get_mpdu_fc_valid = ath11k_hw_qcn9074_rx_desc_get_mpdu_fc_valid,
+ .rx_desc_get_mpdu_start_seq_no = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_seq_no,
+ .rx_desc_get_msdu_len = ath11k_hw_qcn9074_rx_desc_get_msdu_len,
+ .rx_desc_get_msdu_sgi = ath11k_hw_qcn9074_rx_desc_get_msdu_sgi,
+ .rx_desc_get_msdu_rate_mcs = ath11k_hw_qcn9074_rx_desc_get_msdu_rate_mcs,
+ .rx_desc_get_msdu_rx_bw = ath11k_hw_qcn9074_rx_desc_get_msdu_rx_bw,
+ .rx_desc_get_msdu_freq = ath11k_hw_qcn9074_rx_desc_get_msdu_freq,
+ .rx_desc_get_msdu_pkt_type = ath11k_hw_qcn9074_rx_desc_get_msdu_pkt_type,
+ .rx_desc_get_msdu_nss = ath11k_hw_qcn9074_rx_desc_get_msdu_nss,
+ .rx_desc_get_mpdu_tid = ath11k_hw_qcn9074_rx_desc_get_mpdu_tid,
+ .rx_desc_get_mpdu_peer_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_peer_id,
+ .rx_desc_copy_attn_end_tlv = ath11k_hw_qcn9074_rx_desc_copy_attn_end,
+ .rx_desc_get_mpdu_start_tag = ath11k_hw_qcn9074_rx_desc_get_mpdu_start_tag,
+ .rx_desc_get_mpdu_ppdu_id = ath11k_hw_qcn9074_rx_desc_get_mpdu_ppdu_id,
+ .rx_desc_set_msdu_len = ath11k_hw_qcn9074_rx_desc_set_msdu_len,
+ .rx_desc_get_attention = ath11k_hw_qcn9074_rx_desc_get_attention,
+ .reo_setup = ath11k_hw_ipq5018_reo_setup,
+ .rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
+ .mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
+ .get_ring_selector = ath11k_hw_ipq8074_get_tcl_ring_selector,
+};
+
+#define ATH11K_TX_RING_MASK_0 BIT(0)
+#define ATH11K_TX_RING_MASK_1 BIT(1)
+#define ATH11K_TX_RING_MASK_2 BIT(2)
+#define ATH11K_TX_RING_MASK_3 BIT(3)
+#define ATH11K_TX_RING_MASK_4 BIT(4)
+
+#define ATH11K_RX_RING_MASK_0 0x1
+#define ATH11K_RX_RING_MASK_1 0x2
+#define ATH11K_RX_RING_MASK_2 0x4
+#define ATH11K_RX_RING_MASK_3 0x8
+
+#define ATH11K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH11K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
+#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
+#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
+
+#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
+#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
+#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
+
+#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ ATH11K_HOST2RXDMA_RING_MASK_1,
+ ATH11K_HOST2RXDMA_RING_MASK_2,
+ },
+};
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ },
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 Not used */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(9),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = {
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_qcn9074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qcn9074[] = {
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+ },
+ .rx_mon_status = {
+ 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, 0, 0,
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, 0, 0,
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, 0, 0,
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ 0, 0, 0,
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ },
+ .host2rxdma = {
+ 0, 0, 0,
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ },
+};
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ 0,
+ ATH11K_TX_RING_MASK_2,
+ 0,
+ ATH11K_TX_RING_MASK_4,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ 0, ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ 0, ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ 0, ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ },
+};
+
+/* Target firmware's Copy Engine configuration for IPQ5018 */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq5018[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0x2000),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+};
+
+/* Map from service/endpoint to Copy Engine for IPQ5018.
+ * This table is derived from the CE TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq5018[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074 = {
+ .ie1_reg_addr = CE_HOST_IE_ADDRESS,
+ .ie2_reg_addr = CE_HOST_IE_2_ADDRESS,
+ .ie3_reg_addr = CE_HOST_IE_3_ADDRESS,
+};
+
+const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018 = {
+ .ie1_reg_addr = CE_HOST_IPQ5018_IE_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .ie2_reg_addr = CE_HOST_IPQ5018_IE_2_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .ie3_reg_addr = CE_HOST_IPQ5018_IE_3_ADDRESS - HAL_IPQ5018_CE_WFSS_REG_BASE,
+};
+
+const struct ce_remap ath11k_ce_remap_ipq5018 = {
+ .base = HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .size = HAL_IPQ5018_CE_SIZE,
+};
+
+const struct ath11k_hw_regs ipq8074_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000510,
+ .hal_tcl1_ring_base_msb = 0x00000514,
+ .hal_tcl1_ring_id = 0x00000518,
+ .hal_tcl1_ring_misc = 0x00000520,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000052c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000530,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000540,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000544,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000558,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000055c,
+ .hal_tcl1_ring_msi1_data = 0x00000560,
+ .hal_tcl2_ring_base_lsb = 0x00000568,
+ .hal_tcl_ring_base_lsb = 0x00000618,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000720,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x0000029c,
+ .hal_reo1_ring_base_msb = 0x000002a0,
+ .hal_reo1_ring_id = 0x000002a4,
+ .hal_reo1_ring_misc = 0x000002ac,
+ .hal_reo1_ring_hp_addr_lsb = 0x000002b0,
+ .hal_reo1_ring_hp_addr_msb = 0x000002b4,
+ .hal_reo1_ring_producer_int_setup = 0x000002c0,
+ .hal_reo1_ring_msi1_base_lsb = 0x000002e4,
+ .hal_reo1_ring_msi1_base_msb = 0x000002e8,
+ .hal_reo1_ring_msi1_data = 0x000002ec,
+ .hal_reo2_ring_base_lsb = 0x000002f4,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003038,
+ .hal_reo1_ring_tp = 0x0000303c,
+ .hal_reo2_ring_hp = 0x00003040,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000860,
+ .hal_wbm_idle_link_ring_misc = 0x00000870,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001d8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000910,
+ .hal_wbm1_release_ring_base_lsb = 0x00000968,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x0,
+ .pcie_pcs_osc_dtct_config_base = 0x0,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x0,
+
+ /* REO misc control register, not used in IPQ8074 */
+ .hal_reo1_misc_ctl = 0x0,
+};
+
+const struct ath11k_hw_regs qca6390_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000684,
+ .hal_tcl1_ring_base_msb = 0x00000688,
+ .hal_tcl1_ring_id = 0x0000068c,
+ .hal_tcl1_ring_misc = 0x00000694,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006a0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006a4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006b8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006cc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006d0,
+ .hal_tcl1_ring_msi1_data = 0x000006d4,
+ .hal_tcl2_ring_base_lsb = 0x000006dc,
+ .hal_tcl_ring_base_lsb = 0x0000078c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000894,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x00000244,
+ .hal_reo1_ring_base_msb = 0x00000248,
+ .hal_reo1_ring_id = 0x0000024c,
+ .hal_reo1_ring_misc = 0x00000254,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+ .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+ .hal_reo1_ring_producer_int_setup = 0x00000268,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+ .hal_reo1_ring_msi1_base_msb = 0x00000290,
+ .hal_reo1_ring_msi1_data = 0x00000294,
+ .hal_reo2_ring_base_lsb = 0x0000029c,
+ .hal_reo1_aging_thresh_ix_0 = 0x0000050c,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000510,
+ .hal_reo1_aging_thresh_ix_2 = 0x00000514,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000518,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003030,
+ .hal_reo1_ring_tp = 0x00003034,
+ .hal_reo2_ring_hp = 0x00003038,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003a4,
+ .hal_reo_tcl_ring_hp = 0x00003050,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x000004ac,
+ .hal_reo_status_hp = 0x00003068,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x00a00000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x00a01000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x00a02000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x00a03000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000860,
+ .hal_wbm_idle_link_ring_misc = 0x00000870,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001d8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000910,
+ .hal_wbm1_release_ring_base_lsb = 0x00000968,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x000008fc,
+
+ /* REO misc control register, not used in QCA6390 */
+ .hal_reo1_misc_ctl = 0x0,
+};
+
+const struct ath11k_hw_regs qcn9074_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x000004f0,
+ .hal_tcl1_ring_base_msb = 0x000004f4,
+ .hal_tcl1_ring_id = 0x000004f8,
+ .hal_tcl1_ring_misc = 0x00000500,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000050c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000510,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000520,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000524,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000538,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000053c,
+ .hal_tcl1_ring_msi1_data = 0x00000540,
+ .hal_tcl2_ring_base_lsb = 0x00000548,
+ .hal_tcl_ring_base_lsb = 0x000005f8,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000700,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x0000029c,
+ .hal_reo1_ring_base_msb = 0x000002a0,
+ .hal_reo1_ring_id = 0x000002a4,
+ .hal_reo1_ring_misc = 0x000002ac,
+ .hal_reo1_ring_hp_addr_lsb = 0x000002b0,
+ .hal_reo1_ring_hp_addr_msb = 0x000002b4,
+ .hal_reo1_ring_producer_int_setup = 0x000002c0,
+ .hal_reo1_ring_msi1_base_lsb = 0x000002e4,
+ .hal_reo1_ring_msi1_base_msb = 0x000002e8,
+ .hal_reo1_ring_msi1_data = 0x000002ec,
+ .hal_reo2_ring_base_lsb = 0x000002f4,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003038,
+ .hal_reo1_ring_tp = 0x0000303c,
+ .hal_reo2_ring_hp = 0x00003040,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0e0a8,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0f45c,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x0,
+
+ /* REO misc control register, not used in QCN9074 */
+ .hal_reo1_misc_ctl = 0x0,
+};
+
+const struct ath11k_hw_regs wcn6855_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000690,
+ .hal_tcl1_ring_base_msb = 0x00000694,
+ .hal_tcl1_ring_id = 0x00000698,
+ .hal_tcl1_ring_misc = 0x000006a0,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006ac,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b0,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c0,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c4,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006d8,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006dc,
+ .hal_tcl1_ring_msi1_data = 0x000006e0,
+ .hal_tcl2_ring_base_lsb = 0x000006e8,
+ .hal_tcl_ring_base_lsb = 0x00000798,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a0,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x00000244,
+ .hal_reo1_ring_base_msb = 0x00000248,
+ .hal_reo1_ring_id = 0x0000024c,
+ .hal_reo1_ring_misc = 0x00000254,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+ .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+ .hal_reo1_ring_producer_int_setup = 0x00000268,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+ .hal_reo1_ring_msi1_base_msb = 0x00000290,
+ .hal_reo1_ring_msi1_data = 0x00000294,
+ .hal_reo2_ring_base_lsb = 0x0000029c,
+ .hal_reo1_aging_thresh_ix_0 = 0x000005bc,
+ .hal_reo1_aging_thresh_ix_1 = 0x000005c0,
+ .hal_reo1_aging_thresh_ix_2 = 0x000005c4,
+ .hal_reo1_aging_thresh_ix_3 = 0x000005c8,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003030,
+ .hal_reo1_ring_tp = 0x00003034,
+ .hal_reo2_ring_hp = 0x00003038,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x00000454,
+ .hal_reo_tcl_ring_hp = 0x00003060,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x00000194,
+ .hal_reo_cmd_ring_hp = 0x00003020,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x0000055c,
+ .hal_reo_status_hp = 0x00003078,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x000001ec,
+ .hal_sw2reo_ring_hp = 0x00003028,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x1b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x1b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x1b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x1b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000870,
+ .hal_wbm_idle_link_ring_misc = 0x00000880,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001e8,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000920,
+ .hal_wbm1_release_ring_base_lsb = 0x00000978,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
+ .pcie_pcs_osc_dtct_config_base = 0x01e0c628,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x000008fc,
+
+ /* REO misc control register, used for fragment
+ * destination ring config in WCN6855.
+ */
+ .hal_reo1_misc_ctl = 0x00000630,
+};
+
+const struct ath11k_hw_regs wcn6750_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000694,
+ .hal_tcl1_ring_base_msb = 0x00000698,
+ .hal_tcl1_ring_id = 0x0000069c,
+ .hal_tcl1_ring_misc = 0x000006a4,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006e0,
+ .hal_tcl1_ring_msi1_data = 0x000006e4,
+ .hal_tcl2_ring_base_lsb = 0x000006ec,
+ .hal_tcl_ring_base_lsb = 0x0000079c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a4,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x000001ec,
+ .hal_reo1_ring_base_msb = 0x000001f0,
+ .hal_reo1_ring_id = 0x000001f4,
+ .hal_reo1_ring_misc = 0x000001fc,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000200,
+ .hal_reo1_ring_hp_addr_msb = 0x00000204,
+ .hal_reo1_ring_producer_int_setup = 0x00000210,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000234,
+ .hal_reo1_ring_msi1_base_msb = 0x00000238,
+ .hal_reo1_ring_msi1_data = 0x0000023c,
+ .hal_reo2_ring_base_lsb = 0x00000244,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003028,
+ .hal_reo1_ring_tp = 0x0000302c,
+ .hal_reo2_ring_hp = 0x00003030,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x000000e4,
+ .hal_reo_cmd_ring_hp = 0x00003010,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x0000013c,
+ .hal_sw2reo_ring_hp = 0x00003018,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x01b80000,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x01b81000,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x01b82000,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x01b83000,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+
+ /* PCIe base address */
+ .pcie_qserdes_sysclk_en_sel = 0x0,
+ .pcie_pcs_osc_dtct_config_base = 0x0,
+
+ /* Shadow register area */
+ .hal_shadow_base_addr = 0x00000504,
+
+ /* REO misc control register, used for fragment
+ * destination ring config in WCN6750.
+ */
+ .hal_reo1_misc_ctl = 0x000005d8,
+};
+
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_ipq8074[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 1,
+ .rbm_id = HAL_RX_BUF_RBM_SW1_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+};
+
+static const struct ath11k_hw_tcl2wbm_rbm_map ath11k_hw_tcl2wbm_rbm_map_wcn6750[] = {
+ {
+ .tcl_ring_num = 0,
+ .wbm_ring_num = 0,
+ .rbm_id = HAL_RX_BUF_RBM_SW0_BM,
+ },
+ {
+ .tcl_ring_num = 1,
+ .wbm_ring_num = 4,
+ .rbm_id = HAL_RX_BUF_RBM_SW4_BM,
+ },
+ {
+ .tcl_ring_num = 2,
+ .wbm_ring_num = 2,
+ .rbm_id = HAL_RX_BUF_RBM_SW2_BM,
+ },
+};
+
+const struct ath11k_hw_regs ipq5018_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000694,
+ .hal_tcl1_ring_base_msb = 0x00000698,
+ .hal_tcl1_ring_id = 0x0000069c,
+ .hal_tcl1_ring_misc = 0x000006a4,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006b0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006c4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006c8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006dc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006e0,
+ .hal_tcl1_ring_msi1_data = 0x000006e4,
+ .hal_tcl2_ring_base_lsb = 0x000006ec,
+ .hal_tcl_ring_base_lsb = 0x0000079c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x000008a4,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x000001ec,
+ .hal_reo1_ring_base_msb = 0x000001f0,
+ .hal_reo1_ring_id = 0x000001f4,
+ .hal_reo1_ring_misc = 0x000001fc,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000200,
+ .hal_reo1_ring_hp_addr_msb = 0x00000204,
+ .hal_reo1_ring_producer_int_setup = 0x00000210,
+ .hal_reo1_ring_msi1_base_lsb = 0x00000234,
+ .hal_reo1_ring_msi1_base_msb = 0x00000238,
+ .hal_reo1_ring_msi1_data = 0x0000023c,
+ .hal_reo2_ring_base_lsb = 0x00000244,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003028,
+ .hal_reo1_ring_tp = 0x0000302c,
+ .hal_reo2_ring_hp = 0x00003030,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* SW2REO ring address */
+ .hal_sw2reo_ring_base_lsb = 0x0000013c,
+ .hal_sw2reo_ring_hp = 0x00003018,
+
+ /* REO CMD ring address */
+ .hal_reo_cmd_ring_base_lsb = 0x000000e4,
+ .hal_reo_cmd_ring_hp = 0x00003010,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+ /* WCSS relative address */
+ .hal_seq_wcss_umac_ce0_src_reg = 0x08400000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce0_dst_reg = 0x08401000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce1_src_reg = 0x08402000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+ .hal_seq_wcss_umac_ce1_dst_reg = 0x08403000
+ - HAL_IPQ5018_CE_WFSS_REG_BASE,
+
+ /* WBM Idle address */
+ .hal_wbm_idle_link_ring_base_lsb = 0x00000874,
+ .hal_wbm_idle_link_ring_misc = 0x00000884,
+
+ /* SW2WBM release address */
+ .hal_wbm_release_ring_base_lsb = 0x000001ec,
+
+ /* WBM2SW release address */
+ .hal_wbm0_release_ring_base_lsb = 0x00000924,
+ .hal_wbm1_release_ring_base_lsb = 0x0000097c,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_ipq8074,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+ .tcl2wbm_rbm_map = ath11k_hw_tcl2wbm_rbm_map_wcn6750,
+};
+
+static const struct cfg80211_sar_freq_ranges ath11k_hw_sar_freq_ranges_wcn6855[] = {
+ {.start_freq = 2402, .end_freq = 2482 }, /* 2G ch1~ch13 */
+ {.start_freq = 5150, .end_freq = 5250 }, /* 5G UNII-1 ch32~ch48 */
+ {.start_freq = 5250, .end_freq = 5725 }, /* 5G UNII-2 ch50~ch144 */
+ {.start_freq = 5725, .end_freq = 5810 }, /* 5G UNII-3 ch149~ch161 */
+ {.start_freq = 5815, .end_freq = 5895 }, /* 5G UNII-4 ch163~ch177 */
+ {.start_freq = 5925, .end_freq = 6165 }, /* 6G UNII-5 Ch1, Ch2 ~ Ch41 */
+ {.start_freq = 6165, .end_freq = 6425 }, /* 6G UNII-5 ch45~ch93 */
+ {.start_freq = 6425, .end_freq = 6525 }, /* 6G UNII-6 ch97~ch113 */
+ {.start_freq = 6525, .end_freq = 6705 }, /* 6G UNII-7 ch117~ch149 */
+ {.start_freq = 6705, .end_freq = 6875 }, /* 6G UNII-7 ch153~ch185 */
+ {.start_freq = 6875, .end_freq = 7125 }, /* 6G UNII-8 ch189~ch233 */
+};
+
+const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855 = {
+ .type = NL80211_SAR_TYPE_POWER,
+ .num_freq_ranges = (ARRAY_SIZE(ath11k_hw_sar_freq_ranges_wcn6855)),
+ .freq_ranges = ath11k_hw_sar_freq_ranges_wcn6855,
+};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
new file mode 100644
index 0000000000..d51a99669d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -0,0 +1,437 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_HW_H
+#define ATH11K_HW_H
+
+#include "hal.h"
+#include "wmi.h"
+
+/* Target configuration defines */
+
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS(ab) (ab->hw_params.num_vdevs)
+
+#define TARGET_NUM_PEERS_PDEV(ab) (ab->hw_params.num_peers + TARGET_NUM_VDEVS(ab))
+
+/* Num of peers for Single Radio mode */
+#define TARGET_NUM_PEERS_SINGLE(ab) (TARGET_NUM_PEERS_PDEV(ab))
+
+/* Num of peers for DBS */
+#define TARGET_NUM_PEERS_DBS(ab) (2 * TARGET_NUM_PEERS_PDEV(ab))
+
+/* Num of peers for DBS_SBS */
+#define TARGET_NUM_PEERS_DBS_SBS(ab) (3 * TARGET_NUM_PEERS_PDEV(ab))
+
+/* Max num of stations (per radio) */
+#define TARGET_NUM_STATIONS(ab) (ab->hw_params.num_peers)
+
+#define TARGET_NUM_PEERS(ab, x) TARGET_NUM_PEERS_##x(ab)
+#define TARGET_NUM_PEER_KEYS 2
+#define TARGET_NUM_TIDS(ab, x) (2 * TARGET_NUM_PEERS(ab, x) + \
+ 4 * TARGET_NUM_VDEVS(ab) + 8)
+
+#define TARGET_AST_SKID_LIMIT 16
+#define TARGET_NUM_OFFLD_PEERS 4
+#define TARGET_NUM_OFFLD_REORDER_BUFFS 4
+
+#define TARGET_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2) | BIT(4))
+#define TARGET_RX_TIMEOUT_LO_PRI 100
+#define TARGET_RX_TIMEOUT_HI_PRI 40
+
+#define TARGET_DECAP_MODE_RAW 0
+#define TARGET_DECAP_MODE_NATIVE_WIFI 1
+#define TARGET_DECAP_MODE_ETH 2
+
+#define TARGET_SCAN_MAX_PENDING_REQS 4
+#define TARGET_BMISS_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_VDEV 3
+#define TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES 8
+#define TARGET_GTK_OFFLOAD_MAX_VDEV 3
+#define TARGET_NUM_MCAST_GROUPS 12
+#define TARGET_NUM_MCAST_TABLE_ELEMS 64
+#define TARGET_MCAST2UCAST_MODE 2
+#define TARGET_TX_DBG_LOG_SIZE 1024
+#define TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
+#define TARGET_VOW_CONFIG 0
+#define TARGET_NUM_MSDU_DESC (2500)
+#define TARGET_MAX_FRAG_ENTRIES 6
+#define TARGET_MAX_BCN_OFFLD 16
+#define TARGET_NUM_WDS_ENTRIES 32
+#define TARGET_DMA_BURST_SIZE 1
+#define TARGET_RX_BATCHMODE 1
+#define TARGET_EMA_MAX_PROFILE_PERIOD 8
+
+#define ATH11K_HW_MAX_QUEUES 4
+#define ATH11K_QUEUE_LEN 4096
+
+#define ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK 0x4
+
+#define ATH11K_FW_DIR "ath11k"
+
+#define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD"
+#define ATH11K_BOARD_API2_FILE "board-2.bin"
+#define ATH11K_DEFAULT_BOARD_FILE "board.bin"
+#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
+#define ATH11K_AMSS_FILE "amss.bin"
+#define ATH11K_M3_FILE "m3.bin"
+#define ATH11K_REGDB_FILE_NAME "regdb.bin"
+
+#define ATH11K_CE_OFFSET(ab) (ab->mem_ce - ab->mem)
+
+enum ath11k_hw_rate_cck {
+ ATH11K_HW_RATE_CCK_LP_11M = 0,
+ ATH11K_HW_RATE_CCK_LP_5_5M,
+ ATH11K_HW_RATE_CCK_LP_2M,
+ ATH11K_HW_RATE_CCK_LP_1M,
+ ATH11K_HW_RATE_CCK_SP_11M,
+ ATH11K_HW_RATE_CCK_SP_5_5M,
+ ATH11K_HW_RATE_CCK_SP_2M,
+};
+
+enum ath11k_hw_rate_ofdm {
+ ATH11K_HW_RATE_OFDM_48M = 0,
+ ATH11K_HW_RATE_OFDM_24M,
+ ATH11K_HW_RATE_OFDM_12M,
+ ATH11K_HW_RATE_OFDM_6M,
+ ATH11K_HW_RATE_OFDM_54M,
+ ATH11K_HW_RATE_OFDM_36M,
+ ATH11K_HW_RATE_OFDM_18M,
+ ATH11K_HW_RATE_OFDM_9M,
+};
+
+enum ath11k_bus {
+ ATH11K_BUS_AHB,
+ ATH11K_BUS_PCI,
+};
+
+#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
+
+struct hal_rx_desc;
+struct hal_tcl_data_cmd;
+
+struct ath11k_hw_ring_mask {
+ u8 tx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_mon_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_err[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_wbm_rel[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 reo_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rxdma2host[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+};
+
+struct ath11k_hw_tcl2wbm_rbm_map {
+ u8 tcl_ring_num;
+ u8 wbm_ring_num;
+ u8 rbm_id;
+};
+
+struct ath11k_hw_hal_params {
+ enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+ const struct ath11k_hw_tcl2wbm_rbm_map *tcl2wbm_rbm_map;
+};
+
+struct ath11k_hw_params {
+ const char *name;
+ u16 hw_rev;
+ u8 max_radios;
+ u32 bdf_addr;
+
+ struct {
+ const char *dir;
+ size_t board_size;
+ size_t cal_offset;
+ } fw;
+
+ const struct ath11k_hw_ops *hw_ops;
+ const struct ath11k_hw_ring_mask *ring_mask;
+
+ bool internal_sleep_clock;
+
+ const struct ath11k_hw_regs *regs;
+ u32 qmi_service_ins_id;
+ const struct ce_attr *host_ce_config;
+ u32 ce_count;
+ const struct ce_pipe_config *target_ce_config;
+ u32 target_ce_count;
+ const struct service_to_pipe *svc_to_ce_map;
+ u32 svc_to_ce_map_len;
+ const struct ce_ie_addr *ce_ie_addr;
+ const struct ce_remap *ce_remap;
+
+ bool single_pdev_only;
+
+ bool rxdma1_enable;
+ int num_rxmda_per_pdev;
+ bool rx_mac_buf_ring;
+ bool vdev_start_delay;
+ bool htt_peer_map_v2;
+
+ struct {
+ u8 fft_sz;
+ u8 fft_pad_sz;
+ u8 summary_pad_sz;
+ u8 fft_hdr_len;
+ u16 max_fft_bins;
+ bool fragment_160mhz;
+ } spectral;
+
+ u16 interface_modes;
+ bool supports_monitor;
+ bool full_monitor_mode;
+ bool supports_shadow_regs;
+ bool idle_ps;
+ bool supports_sta_ps;
+ bool coldboot_cal_mm;
+ bool coldboot_cal_ftm;
+ bool cbcal_restart_fw;
+ int fw_mem_mode;
+ u32 num_vdevs;
+ u32 num_peers;
+ bool supports_suspend;
+ u32 hal_desc_sz;
+ bool supports_regdb;
+ bool fix_l1ss;
+ bool credit_flow;
+ u8 max_tx_ring;
+ const struct ath11k_hw_hal_params *hal_params;
+ bool supports_dynamic_smps_6ghz;
+ bool alloc_cacheable_memory;
+ bool supports_rssi_stats;
+ bool fw_wmi_diag_event;
+ bool current_cc_support;
+ bool dbr_debug_support;
+ bool global_reset;
+ const struct cfg80211_sar_capa *bios_sar_capa;
+ bool m3_fw_support;
+ bool fixed_bdf_addr;
+ bool fixed_mem_region;
+ bool static_window_map;
+ bool hybrid_bus_type;
+ bool fixed_fw_mem;
+ bool support_off_channel_tx;
+ bool supports_multi_bssid;
+
+ struct {
+ u32 start;
+ u32 end;
+ } sram_dump;
+
+ bool tcl_ring_retry;
+ u32 tx_ring_size;
+ bool smp2p_wow_exit;
+ bool support_fw_mac_sequence;
+};
+
+struct ath11k_hw_ops {
+ u8 (*get_hw_mac_from_pdev_id)(int pdev_id);
+ void (*wmi_init_config)(struct ath11k_base *ab,
+ struct target_resource_config *config);
+ int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id);
+ int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id);
+ void (*tx_mesh_enable)(struct ath11k_base *ab,
+ struct hal_tcl_data_cmd *tcl_cmd);
+ bool (*rx_desc_get_first_msdu)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_last_msdu)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_l3_pad_bytes)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_hdr_status)(struct hal_rx_desc *desc);
+ bool (*rx_desc_encrypt_valid)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_encrypt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_decap_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mesh_ctl)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_ldpc_support)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_seq_ctl_vld)(struct hal_rx_desc *desc);
+ bool (*rx_desc_get_mpdu_fc_valid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_start_seq_no)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_msdu_len)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_sgi)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rate_mcs)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_rx_bw)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_msdu_freq)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_pkt_type)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_msdu_nss)(struct hal_rx_desc *desc);
+ u8 (*rx_desc_get_mpdu_tid)(struct hal_rx_desc *desc);
+ u16 (*rx_desc_get_mpdu_peer_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_copy_attn_end_tlv)(struct hal_rx_desc *fdesc,
+ struct hal_rx_desc *ldesc);
+ u32 (*rx_desc_get_mpdu_start_tag)(struct hal_rx_desc *desc);
+ u32 (*rx_desc_get_mpdu_ppdu_id)(struct hal_rx_desc *desc);
+ void (*rx_desc_set_msdu_len)(struct hal_rx_desc *desc, u16 len);
+ struct rx_attention *(*rx_desc_get_attention)(struct hal_rx_desc *desc);
+ u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
+ void (*reo_setup)(struct ath11k_base *ab);
+ u16 (*mpdu_info_get_peerid)(struct hal_rx_mpdu_info *mpdu_info);
+ bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
+ u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
+ u32 (*get_ring_selector)(struct sk_buff *skb);
+};
+
+extern const struct ath11k_hw_ops ipq8074_ops;
+extern const struct ath11k_hw_ops ipq6018_ops;
+extern const struct ath11k_hw_ops qca6390_ops;
+extern const struct ath11k_hw_ops qcn9074_ops;
+extern const struct ath11k_hw_ops wcn6855_ops;
+extern const struct ath11k_hw_ops wcn6750_ops;
+extern const struct ath11k_hw_ops ipq5018_ops;
+
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_wcn6750;
+
+extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq8074;
+extern const struct ce_ie_addr ath11k_ce_ie_addr_ipq5018;
+
+extern const struct ce_remap ath11k_ce_remap_ipq5018;
+
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_wcn6750;
+
+static inline
+int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
+ int pdev_idx)
+{
+ if (hw->hw_ops->get_hw_mac_from_pdev_id)
+ return hw->hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
+
+ return 0;
+}
+
+static inline int ath11k_hw_mac_id_to_pdev_id(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_pdev_id)
+ return hw->hw_ops->mac_id_to_pdev_id(hw, mac_id);
+
+ return 0;
+}
+
+static inline int ath11k_hw_mac_id_to_srng_id(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_srng_id)
+ return hw->hw_ops->mac_id_to_srng_id(hw, mac_id);
+
+ return 0;
+}
+
+struct ath11k_fw_ie {
+ __le32 id;
+ __le32 len;
+ u8 data[];
+};
+
+enum ath11k_bd_ie_board_type {
+ ATH11K_BD_IE_BOARD_NAME = 0,
+ ATH11K_BD_IE_BOARD_DATA = 1,
+};
+
+enum ath11k_bd_ie_regdb_type {
+ ATH11K_BD_IE_REGDB_NAME = 0,
+ ATH11K_BD_IE_REGDB_DATA = 1,
+};
+
+enum ath11k_bd_ie_type {
+ /* contains sub IEs of enum ath11k_bd_ie_board_type */
+ ATH11K_BD_IE_BOARD = 0,
+ /* contains sub IEs of enum ath11k_bd_ie_regdb_type */
+ ATH11K_BD_IE_REGDB = 1,
+};
+
+struct ath11k_hw_regs {
+ u32 hal_tcl1_ring_base_lsb;
+ u32 hal_tcl1_ring_base_msb;
+ u32 hal_tcl1_ring_id;
+ u32 hal_tcl1_ring_misc;
+ u32 hal_tcl1_ring_tp_addr_lsb;
+ u32 hal_tcl1_ring_tp_addr_msb;
+ u32 hal_tcl1_ring_consumer_int_setup_ix0;
+ u32 hal_tcl1_ring_consumer_int_setup_ix1;
+ u32 hal_tcl1_ring_msi1_base_lsb;
+ u32 hal_tcl1_ring_msi1_base_msb;
+ u32 hal_tcl1_ring_msi1_data;
+ u32 hal_tcl2_ring_base_lsb;
+ u32 hal_tcl_ring_base_lsb;
+
+ u32 hal_tcl_status_ring_base_lsb;
+
+ u32 hal_reo1_ring_base_lsb;
+ u32 hal_reo1_ring_base_msb;
+ u32 hal_reo1_ring_id;
+ u32 hal_reo1_ring_misc;
+ u32 hal_reo1_ring_hp_addr_lsb;
+ u32 hal_reo1_ring_hp_addr_msb;
+ u32 hal_reo1_ring_producer_int_setup;
+ u32 hal_reo1_ring_msi1_base_lsb;
+ u32 hal_reo1_ring_msi1_base_msb;
+ u32 hal_reo1_ring_msi1_data;
+ u32 hal_reo2_ring_base_lsb;
+ u32 hal_reo1_aging_thresh_ix_0;
+ u32 hal_reo1_aging_thresh_ix_1;
+ u32 hal_reo1_aging_thresh_ix_2;
+ u32 hal_reo1_aging_thresh_ix_3;
+
+ u32 hal_reo1_ring_hp;
+ u32 hal_reo1_ring_tp;
+ u32 hal_reo2_ring_hp;
+
+ u32 hal_reo_tcl_ring_base_lsb;
+ u32 hal_reo_tcl_ring_hp;
+
+ u32 hal_reo_status_ring_base_lsb;
+ u32 hal_reo_status_hp;
+
+ u32 hal_reo_cmd_ring_base_lsb;
+ u32 hal_reo_cmd_ring_hp;
+
+ u32 hal_sw2reo_ring_base_lsb;
+ u32 hal_sw2reo_ring_hp;
+
+ u32 hal_seq_wcss_umac_ce0_src_reg;
+ u32 hal_seq_wcss_umac_ce0_dst_reg;
+ u32 hal_seq_wcss_umac_ce1_src_reg;
+ u32 hal_seq_wcss_umac_ce1_dst_reg;
+
+ u32 hal_wbm_idle_link_ring_base_lsb;
+ u32 hal_wbm_idle_link_ring_misc;
+
+ u32 hal_wbm_release_ring_base_lsb;
+
+ u32 hal_wbm0_release_ring_base_lsb;
+ u32 hal_wbm1_release_ring_base_lsb;
+
+ u32 pcie_qserdes_sysclk_en_sel;
+ u32 pcie_pcs_osc_dtct_config_base;
+
+ u32 hal_shadow_base_addr;
+ u32 hal_reo1_misc_ctl;
+};
+
+extern const struct ath11k_hw_regs ipq8074_regs;
+extern const struct ath11k_hw_regs qca6390_regs;
+extern const struct ath11k_hw_regs qcn9074_regs;
+extern const struct ath11k_hw_regs wcn6855_regs;
+extern const struct ath11k_hw_regs wcn6750_regs;
+extern const struct ath11k_hw_regs ipq5018_regs;
+
+static inline const char *ath11k_bd_ie_type_str(enum ath11k_bd_ie_type type)
+{
+ switch (type) {
+ case ATH11K_BD_IE_BOARD:
+ return "board data";
+ case ATH11K_BD_IE_REGDB:
+ return "regdb data";
+ }
+
+ return "unknown";
+}
+
+extern const struct cfg80211_sar_capa ath11k_hw_sar_capa_wcn6855;
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
new file mode 100644
index 0000000000..b328a05998
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -0,0 +1,9838 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <linux/bitfield.h>
+#include <linux/inetdevice.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
+#include "mac.h"
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+#include "testmode.h"
+#include "peer.h"
+#include "debugfs_sta.h"
+#include "hif.h"
+#include "wow.h"
+
+#define CHAN2G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_2GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN5G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_5GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+#define CHAN6G(_channel, _freq, _flags) { \
+ .band = NL80211_BAND_6GHZ, \
+ .hw_value = (_channel), \
+ .center_freq = (_freq), \
+ .flags = (_flags), \
+ .max_antenna_gain = 0, \
+ .max_power = 30, \
+}
+
+static const struct ieee80211_channel ath11k_2ghz_channels[] = {
+ CHAN2G(1, 2412, 0),
+ CHAN2G(2, 2417, 0),
+ CHAN2G(3, 2422, 0),
+ CHAN2G(4, 2427, 0),
+ CHAN2G(5, 2432, 0),
+ CHAN2G(6, 2437, 0),
+ CHAN2G(7, 2442, 0),
+ CHAN2G(8, 2447, 0),
+ CHAN2G(9, 2452, 0),
+ CHAN2G(10, 2457, 0),
+ CHAN2G(11, 2462, 0),
+ CHAN2G(12, 2467, 0),
+ CHAN2G(13, 2472, 0),
+ CHAN2G(14, 2484, 0),
+};
+
+static const struct ieee80211_channel ath11k_5ghz_channels[] = {
+ CHAN5G(36, 5180, 0),
+ CHAN5G(40, 5200, 0),
+ CHAN5G(44, 5220, 0),
+ CHAN5G(48, 5240, 0),
+ CHAN5G(52, 5260, 0),
+ CHAN5G(56, 5280, 0),
+ CHAN5G(60, 5300, 0),
+ CHAN5G(64, 5320, 0),
+ CHAN5G(100, 5500, 0),
+ CHAN5G(104, 5520, 0),
+ CHAN5G(108, 5540, 0),
+ CHAN5G(112, 5560, 0),
+ CHAN5G(116, 5580, 0),
+ CHAN5G(120, 5600, 0),
+ CHAN5G(124, 5620, 0),
+ CHAN5G(128, 5640, 0),
+ CHAN5G(132, 5660, 0),
+ CHAN5G(136, 5680, 0),
+ CHAN5G(140, 5700, 0),
+ CHAN5G(144, 5720, 0),
+ CHAN5G(149, 5745, 0),
+ CHAN5G(153, 5765, 0),
+ CHAN5G(157, 5785, 0),
+ CHAN5G(161, 5805, 0),
+ CHAN5G(165, 5825, 0),
+ CHAN5G(169, 5845, 0),
+ CHAN5G(173, 5865, 0),
+ CHAN5G(177, 5885, 0),
+};
+
+static const struct ieee80211_channel ath11k_6ghz_channels[] = {
+ CHAN6G(1, 5955, 0),
+ CHAN6G(5, 5975, 0),
+ CHAN6G(9, 5995, 0),
+ CHAN6G(13, 6015, 0),
+ CHAN6G(17, 6035, 0),
+ CHAN6G(21, 6055, 0),
+ CHAN6G(25, 6075, 0),
+ CHAN6G(29, 6095, 0),
+ CHAN6G(33, 6115, 0),
+ CHAN6G(37, 6135, 0),
+ CHAN6G(41, 6155, 0),
+ CHAN6G(45, 6175, 0),
+ CHAN6G(49, 6195, 0),
+ CHAN6G(53, 6215, 0),
+ CHAN6G(57, 6235, 0),
+ CHAN6G(61, 6255, 0),
+ CHAN6G(65, 6275, 0),
+ CHAN6G(69, 6295, 0),
+ CHAN6G(73, 6315, 0),
+ CHAN6G(77, 6335, 0),
+ CHAN6G(81, 6355, 0),
+ CHAN6G(85, 6375, 0),
+ CHAN6G(89, 6395, 0),
+ CHAN6G(93, 6415, 0),
+ CHAN6G(97, 6435, 0),
+ CHAN6G(101, 6455, 0),
+ CHAN6G(105, 6475, 0),
+ CHAN6G(109, 6495, 0),
+ CHAN6G(113, 6515, 0),
+ CHAN6G(117, 6535, 0),
+ CHAN6G(121, 6555, 0),
+ CHAN6G(125, 6575, 0),
+ CHAN6G(129, 6595, 0),
+ CHAN6G(133, 6615, 0),
+ CHAN6G(137, 6635, 0),
+ CHAN6G(141, 6655, 0),
+ CHAN6G(145, 6675, 0),
+ CHAN6G(149, 6695, 0),
+ CHAN6G(153, 6715, 0),
+ CHAN6G(157, 6735, 0),
+ CHAN6G(161, 6755, 0),
+ CHAN6G(165, 6775, 0),
+ CHAN6G(169, 6795, 0),
+ CHAN6G(173, 6815, 0),
+ CHAN6G(177, 6835, 0),
+ CHAN6G(181, 6855, 0),
+ CHAN6G(185, 6875, 0),
+ CHAN6G(189, 6895, 0),
+ CHAN6G(193, 6915, 0),
+ CHAN6G(197, 6935, 0),
+ CHAN6G(201, 6955, 0),
+ CHAN6G(205, 6975, 0),
+ CHAN6G(209, 6995, 0),
+ CHAN6G(213, 7015, 0),
+ CHAN6G(217, 7035, 0),
+ CHAN6G(221, 7055, 0),
+ CHAN6G(225, 7075, 0),
+ CHAN6G(229, 7095, 0),
+ CHAN6G(233, 7115, 0),
+
+ /* new addition in IEEE Std 802.11ax-2021 */
+ CHAN6G(2, 5935, 0),
+};
+
+static struct ieee80211_rate ath11k_legacy_rates[] = {
+ { .bitrate = 10,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_1M },
+ { .bitrate = 20,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_2M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_2M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 55,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_5_5M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_5_5M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+ { .bitrate = 110,
+ .hw_value = ATH11K_HW_RATE_CCK_LP_11M,
+ .hw_value_short = ATH11K_HW_RATE_CCK_SP_11M,
+ .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+
+ { .bitrate = 60, .hw_value = ATH11K_HW_RATE_OFDM_6M },
+ { .bitrate = 90, .hw_value = ATH11K_HW_RATE_OFDM_9M },
+ { .bitrate = 120, .hw_value = ATH11K_HW_RATE_OFDM_12M },
+ { .bitrate = 180, .hw_value = ATH11K_HW_RATE_OFDM_18M },
+ { .bitrate = 240, .hw_value = ATH11K_HW_RATE_OFDM_24M },
+ { .bitrate = 360, .hw_value = ATH11K_HW_RATE_OFDM_36M },
+ { .bitrate = 480, .hw_value = ATH11K_HW_RATE_OFDM_48M },
+ { .bitrate = 540, .hw_value = ATH11K_HW_RATE_OFDM_54M },
+};
+
+static const int
+ath11k_phymodes[NUM_NL80211_BANDS][ATH11K_CHAN_WIDTH_NUM] = {
+ [NL80211_BAND_2GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20_2G,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40_2G,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80_2G,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_160] = MODE_UNKNOWN,
+ },
+ [NL80211_BAND_5GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+ [NL80211_BAND_6GHZ] = {
+ [NL80211_CHAN_WIDTH_5] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_10] = MODE_UNKNOWN,
+ [NL80211_CHAN_WIDTH_20_NOHT] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_20] = MODE_11AX_HE20,
+ [NL80211_CHAN_WIDTH_40] = MODE_11AX_HE40,
+ [NL80211_CHAN_WIDTH_80] = MODE_11AX_HE80,
+ [NL80211_CHAN_WIDTH_160] = MODE_11AX_HE160,
+ [NL80211_CHAN_WIDTH_80P80] = MODE_11AX_HE80_80,
+ },
+
+};
+
+const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default = {
+ .rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+ .pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
+ .pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
+ .pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
+ .pkt_filter_flags3 = HTT_RX_FP_DATA_FILTER_FLASG3 |
+ HTT_RX_FP_CTRL_FILTER_FLASG3
+};
+
+#define ATH11K_MAC_FIRST_OFDM_RATE_IDX 4
+#define ath11k_g_rates ath11k_legacy_rates
+#define ath11k_g_rates_size (ARRAY_SIZE(ath11k_legacy_rates))
+#define ath11k_a_rates (ath11k_legacy_rates + 4)
+#define ath11k_a_rates_size (ARRAY_SIZE(ath11k_legacy_rates) - 4)
+
+#define ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD 200 /* in msecs */
+
+/* Overhead due to the processing of channel switch events from FW */
+#define ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* in msecs */
+
+static const u32 ath11k_smps_map[] = {
+ [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
+ [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
+ [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
+ [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
+};
+
+static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy)
+{
+ enum nl80211_he_ru_alloc ret;
+
+ switch (ru_phy) {
+ case RU_26:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ case RU_52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case RU_106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case RU_242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case RU_484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case RU_996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ default:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ }
+
+ return ret;
+}
+
+enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
+{
+ enum nl80211_he_ru_alloc ret;
+
+ switch (ru_tones) {
+ case 26:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ case 52:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+ break;
+ case 106:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+ break;
+ case 242:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+ break;
+ case 484:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+ break;
+ case 996:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+ break;
+ case (996 * 2):
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+ break;
+ default:
+ ret = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+ break;
+ }
+
+ return ret;
+}
+
+enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi)
+{
+ enum nl80211_he_gi ret;
+
+ switch (sgi) {
+ case RX_MSDU_START_SGI_0_8_US:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ case RX_MSDU_START_SGI_1_6_US:
+ ret = NL80211_RATE_INFO_HE_GI_1_6;
+ break;
+ case RX_MSDU_START_SGI_3_2_US:
+ ret = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ default:
+ ret = NL80211_RATE_INFO_HE_GI_0_8;
+ break;
+ }
+
+ return ret;
+}
+
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
+{
+ u8 ret = 0;
+
+ switch (bw) {
+ case ATH11K_BW_20:
+ ret = RATE_INFO_BW_20;
+ break;
+ case ATH11K_BW_40:
+ ret = RATE_INFO_BW_40;
+ break;
+ case ATH11K_BW_80:
+ ret = RATE_INFO_BW_80;
+ break;
+ case ATH11K_BW_160:
+ ret = RATE_INFO_BW_160;
+ break;
+ }
+
+ return ret;
+}
+
+enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw)
+{
+ switch (bw) {
+ case RATE_INFO_BW_20:
+ return ATH11K_BW_20;
+ case RATE_INFO_BW_40:
+ return ATH11K_BW_40;
+ case RATE_INFO_BW_80:
+ return ATH11K_BW_80;
+ case RATE_INFO_BW_160:
+ return ATH11K_BW_160;
+ default:
+ return ATH11K_BW_20;
+ }
+}
+
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate)
+{
+ /* As default, it is OFDM rates */
+ int i = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ int max_rates_idx = ath11k_g_rates_size;
+
+ if (preamble == WMI_RATE_PREAMBLE_CCK) {
+ hw_rc &= ~ATH11k_HW_RATECODE_CCK_SHORT_PREAM_MASK;
+ i = 0;
+ max_rates_idx = ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+ }
+
+ while (i < max_rates_idx) {
+ if (hw_rc == ath11k_legacy_rates[i].hw_value) {
+ *rateidx = i;
+ *rate = ath11k_legacy_rates[i].bitrate;
+ return 0;
+ }
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+static int get_num_chains(u32 mask)
+{
+ int num_chains = 0;
+
+ while (mask) {
+ if (mask & BIT(0))
+ num_chains++;
+ mask >>= 1;
+ }
+
+ return num_chains;
+}
+
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate)
+{
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++)
+ if (sband->bitrates[i].bitrate == bitrate)
+ return i;
+
+ return 0;
+}
+
+static u32
+ath11k_mac_max_ht_nss(const u8 *ht_mcs_mask)
+{
+ int nss;
+
+ for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
+ if (ht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath11k_mac_max_vht_nss(const u16 *vht_mcs_mask)
+{
+ int nss;
+
+ for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
+ if (vht_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u32
+ath11k_mac_max_he_nss(const u16 *he_mcs_mask)
+{
+ int nss;
+
+ for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--)
+ if (he_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
+static u8 ath11k_parse_mpdudensity(u8 mpdudensity)
+{
+/* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
+ * 0 for no restriction
+ * 1 for 1/4 us
+ * 2 for 1/2 us
+ * 3 for 1 us
+ * 4 for 2 us
+ * 5 for 4 us
+ * 6 for 8 us
+ * 7 for 16 us
+ */
+ switch (mpdudensity) {
+ case 0:
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ /* Our lower layer calculations limit our precision to
+ * 1 microsecond
+ */
+ return 1;
+ case 4:
+ return 2;
+ case 5:
+ return 4;
+ case 6:
+ return 8;
+ case 7:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+static int ath11k_mac_vif_chan(struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ieee80211_chanctx_conf *conf;
+
+ rcu_read_lock();
+ conf = rcu_dereference(vif->bss_conf.chanctx_conf);
+ if (!conf) {
+ rcu_read_unlock();
+ return -ENOENT;
+ }
+
+ *def = conf->def;
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static bool ath11k_mac_bitrate_is_cck(int bitrate)
+{
+ switch (bitrate) {
+ case 10:
+ case 20:
+ case 55:
+ case 110:
+ return true;
+ }
+
+ return false;
+}
+
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck)
+{
+ const struct ieee80211_rate *rate;
+ int i;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ rate = &sband->bitrates[i];
+
+ if (ath11k_mac_bitrate_is_cck(rate->bitrate) != cck)
+ continue;
+
+ if (rate->hw_value == hw_rate)
+ return i;
+ else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
+ rate->hw_value_short == hw_rate)
+ return i;
+ }
+
+ return 0;
+}
+
+static u8 ath11k_mac_bitrate_to_rate(int bitrate)
+{
+ return DIV_ROUND_UP(bitrate, 5) |
+ (ath11k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
+}
+
+static void ath11k_get_arvif_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_vif_iter *arvif_iter = data;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+
+ if (arvif->vdev_id == arvif_iter->vdev_id)
+ arvif_iter->arvif = arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_vif_iter arvif_iter;
+ u32 flags;
+
+ memset(&arvif_iter, 0, sizeof(struct ath11k_vif_iter));
+ arvif_iter.vdev_id = vdev_id;
+
+ flags = IEEE80211_IFACE_ITER_RESUME_ALL;
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ flags,
+ ath11k_get_arvif_iter,
+ &arvif_iter);
+ if (!arvif_iter.arvif) {
+ ath11k_warn(ar->ab, "No VIF found for vdev %d\n", vdev_id);
+ return NULL;
+ }
+
+ return arvif_iter.arvif;
+}
+
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar &&
+ (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
+ arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
+ if (arvif)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ if (pdev->ar->allocated_vdev_map & (1LL << vdev_id))
+ return pdev->ar;
+ }
+ }
+
+ return NULL;
+}
+
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+
+ if (ab->hw_params.single_pdev_only) {
+ pdev = rcu_dereference(ab->pdevs_active[0]);
+ return pdev ? pdev->ar : NULL;
+ }
+
+ if (WARN_ON(pdev_id > ab->num_radios))
+ return NULL;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ if (ab->fw_mode == ATH11K_FIRMWARE_MODE_FTM)
+ pdev = &ab->pdevs[i];
+ else
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+
+ if (pdev && pdev->pdev_id == pdev_id)
+ return (pdev->ar ? pdev->ar : NULL);
+ }
+
+ return NULL;
+}
+
+struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up)
+ return arvif;
+ }
+ }
+
+ return NULL;
+}
+
+static bool ath11k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2)
+{
+ return (((band1 == NL80211_BAND_2GHZ) && (band2 & WMI_HOST_WLAN_2G_CAP)) ||
+ (((band1 == NL80211_BAND_5GHZ) || (band1 == NL80211_BAND_6GHZ)) &&
+ (band2 & WMI_HOST_WLAN_5G_CAP)));
+}
+
+u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 pdev_id = ab->target_pdev_ids[0].pdev_id;
+ int i;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return pdev_id;
+
+ band = def.chan->band;
+
+ for (i = 0; i < ab->target_pdev_count; i++) {
+ if (ath11k_mac_band_match(band, ab->target_pdev_ids[i].supported_bands))
+ return ab->target_pdev_ids[i].pdev_id;
+ }
+
+ return pdev_id;
+}
+
+u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ arvif = ath11k_mac_get_vif_up(ar->ab);
+
+ if (arvif)
+ return ath11k_mac_get_target_pdev_id_from_vif(arvif);
+ else
+ return ar->ab->target_pdev_ids[0].pdev_id;
+}
+
+static void ath11k_pdev_caps_update(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+
+ ar->max_tx_power = ab->target_caps.hw_max_tx_power;
+
+ /* FIXME Set min_tx_power to ab->target_caps.hw_min_tx_power.
+ * But since the received value in svcrdy is same as hw_max_tx_power,
+ * we can set ar->min_tx_power to 0 currently until
+ * this is fixed in firmware
+ */
+ ar->min_tx_power = 0;
+
+ ar->txpower_limit_2g = ar->max_tx_power;
+ ar->txpower_limit_5g = ar->max_tx_power;
+ ar->txpower_scale = WMI_HOST_TP_SCALE_MAX;
+}
+
+static int ath11k_mac_txpower_recalc(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct ath11k_vif *arvif;
+ int ret, txpower = -1;
+ u32 param;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->txpower <= 0)
+ continue;
+
+ if (txpower == -1)
+ txpower = arvif->txpower;
+ else
+ txpower = min(txpower, arvif->txpower);
+ }
+
+ if (txpower == -1)
+ return 0;
+
+ /* txpwr is set as 2 units per dBm in FW*/
+ txpower = min_t(u32, max_t(u32, ar->min_tx_power, txpower),
+ ar->max_tx_power) * 2;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower to set in hw %d\n",
+ txpower / 2);
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ ar->txpower_limit_2g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_2g = txpower;
+ }
+
+ if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+ ar->txpower_limit_5g != txpower) {
+ param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
+ ret = ath11k_wmi_pdev_set_param(ar, param,
+ txpower, ar->pdev->pdev_id);
+ if (ret)
+ goto fail;
+ ar->txpower_limit_5g = txpower;
+ }
+
+ return 0;
+
+fail:
+ ath11k_warn(ar->ab, "failed to recalc txpower limit %d using pdev param %d: %d\n",
+ txpower / 2, param, ret);
+ return ret;
+}
+
+static int ath11k_recalc_rtscts_prot(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param, rts_cts = 0;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ vdev_param = WMI_VDEV_PARAM_ENABLE_RTSCTS;
+
+ /* Enable RTS/CTS protection for sw retries (when legacy stations
+ * are in BSS) or by default only for second rate series.
+ * TODO: Check if we need to enable CTS 2 Self in any case
+ */
+ rts_cts = WMI_USE_RTS_CTS;
+
+ if (arvif->num_legacy_stations > 0)
+ rts_cts |= WMI_RTSCTS_ACROSS_SW_RETRIES << 4;
+ else
+ rts_cts |= WMI_RTSCTS_FOR_SECOND_RATESERIES << 4;
+
+ /* Need not send duplicate param value to firmware */
+ if (arvif->rtscts_prot_mode == rts_cts)
+ return 0;
+
+ arvif->rtscts_prot_mode = rts_cts;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d recalc rts/cts prot %d\n",
+ arvif->vdev_id, rts_cts);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rts_cts);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to recalculate rts/cts prot for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return ret;
+}
+
+static int ath11k_mac_set_kickout(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 param;
+ int ret;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ ATH11K_KICKOUT_THRESHOLD,
+ ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set kickout threshold on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MIN_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive minimum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_IDLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum idle time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ param = WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ ATH11K_KEEPALIVE_MAX_UNRESPONSIVE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ mutex_lock(&ab->tbl_mtx_lock);
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ ath11k_peer_rx_tid_cleanup(ar, peer);
+ ath11k_peer_rhash_delete(ab, peer);
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ ar->num_peers = 0;
+ ar->num_stations = 0;
+}
+
+static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH11K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static void
+ath11k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *channel;
+ struct wmi_vdev_start_req_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ channel = chandef->chan;
+
+ arg.vdev_id = vdev_id;
+ arg.channel.freq = channel->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+
+ arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width];
+ arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power;
+ arg.channel.max_reg_power = channel->max_reg_power;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ arg.channel.passive = !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr, NULL, 0, 0);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
+ vdev_id, ret);
+ goto vdev_stop;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i started\n",
+ vdev_id);
+
+ return 0;
+
+vdev_stop:
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i stop: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ return -EIO;
+}
+
+static int ath11k_mac_monitor_vdev_stop(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath11k_mac_monitor_vdev_create(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct vdev_create_params param = {};
+ int bit, ret;
+ u8 tmp_addr[6] = {0};
+ u16 nss;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ if (ar->ab->free_vdev_map == 0) {
+ ath11k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->ab->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ param.if_id = ar->monitor_vdev_id;
+ param.type = WMI_VDEV_TYPE_MONITOR;
+ param.subtype = WMI_VDEV_SUBTYPE_NONE;
+ param.pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ param.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ param.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+
+ ret = ath11k_wmi_vdev_create(ar, tmp_addr, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ ar->monitor_vdev_id = -1;
+ return ret;
+ }
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to recalc txpower for monitor vdev %d: %d\n",
+ ar->monitor_vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
+ ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->num_created_vdevs++;
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+
+err_vdev_del:
+ ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ ar->monitor_vdev_id = -1;
+ return ret;
+}
+
+static int ath11k_mac_monitor_vdev_delete(struct ath11k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH11K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+
+ ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
+ ar->num_created_vdevs--;
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_monitor_start(struct ath11k *ar)
+{
+ struct cfg80211_chan_def *chandef = NULL;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath11k_mac_get_any_chandef_iter,
+ &chandef);
+ if (!chandef)
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
+ ath11k_mac_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ set_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->num_started_vdevs++;
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during start: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor started\n");
+
+ return 0;
+}
+
+static int ath11k_mac_monitor_stop(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+ ar->num_started_vdevs--;
+
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during stop: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "monitor stopped ret %d\n", ret);
+
+ return 0;
+}
+
+static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_conf *conf = &ar->hw->conf;
+ enum wmi_sta_powersave_param param;
+ enum wmi_sta_ps_mode psmode;
+ int ret;
+ int timeout;
+ bool enable_ps;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (arvif->vif->type != NL80211_IFTYPE_STATION)
+ return 0;
+
+ enable_ps = arvif->ps;
+
+ if (!arvif->is_started) {
+ /* mac80211 can update vif powersave state while disconnected.
+ * Firmware doesn't behave nicely and consumes more power than
+ * necessary if PS is disabled on a non-started vdev. Hence
+ * force-enable PS for non-running vdevs.
+ */
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ } else if (enable_ps) {
+ psmode = WMI_STA_PS_MODE_ENABLED;
+ param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
+
+ timeout = conf->dynamic_ps_timeout;
+ if (timeout == 0) {
+ /* firmware doesn't like 0 */
+ timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+ }
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
+ timeout);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set inactivity time for vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ } else {
+ psmode = WMI_STA_PS_MODE_DISABLED;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d psmode %s\n",
+ arvif->vdev_id, psmode ? "enable" : "disable");
+
+ ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id, psmode);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sta power save mode %d for vdev %d: %d\n",
+ psmode, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_config_ps(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_mac_vif_setup_ps(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup powersave: %d\n", ret);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ set_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to create monitor vdev: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor: %d",
+ ret);
+ goto err_mon_del;
+ }
+ } else {
+ clear_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete monitor vdev: %d",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+
+err_mon_del:
+ ath11k_mac_monitor_vdev_delete(ar);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_setup_nontx_vif_rsnie(struct ath11k_vif *arvif,
+ bool tx_arvif_rsnie_present,
+ const u8 *profile, u8 profile_len)
+{
+ if (cfg80211_find_ie(WLAN_EID_RSN, profile, profile_len)) {
+ arvif->rsnie_present = true;
+ } else if (tx_arvif_rsnie_present) {
+ int i;
+ u8 nie_len;
+ const u8 *nie = cfg80211_find_ext_ie(WLAN_EID_EXT_NON_INHERITANCE,
+ profile, profile_len);
+ if (!nie)
+ return;
+
+ nie_len = nie[1];
+ nie += 2;
+ for (i = 0; i < nie_len; i++) {
+ if (nie[i] == WLAN_EID_RSN) {
+ arvif->rsnie_present = false;
+ break;
+ }
+ }
+ }
+}
+
+static bool ath11k_mac_set_nontx_vif_params(struct ath11k_vif *tx_arvif,
+ struct ath11k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ieee80211_mgmt *mgmt;
+ const u8 *ies, *profile, *next_profile;
+ int ies_len;
+
+ ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
+ mgmt = (struct ieee80211_mgmt *)bcn->data;
+ ies += sizeof(mgmt->u.beacon);
+ ies_len = skb_tail_pointer(bcn) - ies;
+
+ ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ies, ies_len);
+ arvif->rsnie_present = tx_arvif->rsnie_present;
+
+ while (ies) {
+ u8 mbssid_len;
+
+ ies_len -= (2 + ies[1]);
+ mbssid_len = ies[1] - 1;
+ profile = &ies[3];
+
+ while (mbssid_len) {
+ u8 profile_len;
+
+ profile_len = profile[1];
+ next_profile = profile + (2 + profile_len);
+ mbssid_len -= (2 + profile_len);
+
+ profile += 2;
+ profile_len -= (2 + profile[1]);
+ profile += (2 + profile[1]); /* nontx capabilities */
+ profile_len -= (2 + profile[1]);
+ profile += (2 + profile[1]); /* SSID */
+ if (profile[2] == arvif->vif->bss_conf.bssid_index) {
+ profile_len -= 5;
+ profile = profile + 5;
+ ath11k_mac_setup_nontx_vif_rsnie(arvif,
+ tx_arvif->rsnie_present,
+ profile,
+ profile_len);
+ return true;
+ }
+ profile = next_profile;
+ }
+ ies = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, profile,
+ ies_len);
+ }
+
+ return false;
+}
+
+static void ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ieee80211_mgmt *mgmt;
+ u8 *ies;
+
+ ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
+ mgmt = (struct ieee80211_mgmt *)bcn->data;
+ ies += sizeof(mgmt->u.beacon);
+
+ if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
+ arvif->rsnie_present = true;
+ else
+ arvif->rsnie_present = false;
+
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies, (skb_tail_pointer(bcn) - ies)))
+ arvif->wpaie_present = true;
+ else
+ arvif->wpaie_present = false;
+}
+
+static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
+{
+ struct ath11k_vif *tx_arvif;
+ struct ieee80211_ema_beacons *beacons;
+ int ret = 0;
+ bool nontx_vif_params_set = false;
+ u32 params = 0;
+ u8 i = 0;
+
+ tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
+
+ beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw,
+ tx_arvif->vif, 0);
+ if (!beacons || !beacons->cnt) {
+ ath11k_warn(arvif->ar->ab,
+ "failed to get ema beacon templates from mac80211\n");
+ return -EPERM;
+ }
+
+ if (tx_arvif == arvif)
+ ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb);
+ else
+ arvif->wpaie_present = tx_arvif->wpaie_present;
+
+ for (i = 0; i < beacons->cnt; i++) {
+ if (tx_arvif != arvif && !nontx_vif_params_set)
+ nontx_vif_params_set =
+ ath11k_mac_set_nontx_vif_params(tx_arvif, arvif,
+ beacons->bcn[i].skb);
+
+ params = beacons->cnt;
+ params |= (i << WMI_EMA_TMPL_IDX_SHIFT);
+ params |= ((!i ? 1 : 0) << WMI_EMA_FIRST_TMPL_SHIFT);
+ params |= ((i + 1 == beacons->cnt ? 1 : 0) << WMI_EMA_LAST_TMPL_SHIFT);
+
+ ret = ath11k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
+ &beacons->bcn[i].offs,
+ beacons->bcn[i].skb, params);
+ if (ret) {
+ ath11k_warn(tx_arvif->ar->ab,
+ "failed to set ema beacon template id %i error %d\n",
+ i, ret);
+ break;
+ }
+ }
+
+ ieee80211_beacon_free_ema_list(beacons);
+
+ if (tx_arvif != arvif && !nontx_vif_params_set)
+ return -EINVAL; /* Profile not found in the beacons */
+
+ return ret;
+}
+
+static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *tx_arvif = arvif;
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_mutable_offsets offs = {};
+ struct sk_buff *bcn;
+ int ret;
+
+ if (vif->mbssid_tx_vif) {
+ tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif);
+ if (tx_arvif != arvif) {
+ ar = tx_arvif->ar;
+ ab = ar->ab;
+ hw = ar->hw;
+ vif = tx_arvif->vif;
+ }
+ }
+
+ bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (!bcn) {
+ ath11k_warn(ab, "failed to get beacon template from mac80211\n");
+ return -EPERM;
+ }
+
+ if (tx_arvif == arvif)
+ ath11k_mac_set_vif_params(tx_arvif, bcn);
+ else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn))
+ return -EINVAL;
+
+ ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, 0);
+ kfree_skb(bcn);
+
+ if (ret)
+ ath11k_warn(ab, "failed to submit beacon template command: %d\n",
+ ret);
+
+ return ret;
+}
+
+static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
+ return 0;
+
+ /* Target does not expect beacon templates for the already up
+ * non-transmitting interfaces, and results in a crash if sent.
+ */
+ if (vif->mbssid_tx_vif &&
+ arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up)
+ return 0;
+
+ if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif)
+ return ath11k_mac_setup_bcn_tmpl_ema(arvif);
+
+ return ath11k_mac_setup_bcn_tmpl_mbssid(arvif);
+}
+
+void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!vif->bss_conf.color_change_active && !arvif->bcca_zero_sent)
+ return;
+
+ if (vif->bss_conf.color_change_active &&
+ ieee80211_beacon_cntdwn_is_complete(vif)) {
+ arvif->bcca_zero_sent = true;
+ ieee80211_color_change_finish(vif);
+ return;
+ }
+
+ arvif->bcca_zero_sent = false;
+
+ if (vif->bss_conf.color_change_active)
+ ieee80211_beacon_update_cntdwn(vif);
+ ath11k_mac_setup_bcn_tmpl(arvif);
+}
+
+static void ath11k_control_beaconing(struct ath11k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_vif *tx_arvif = NULL;
+ int ret = 0;
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ if (!info->enable_beacon) {
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev_id %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+ return;
+ }
+
+ /* Install the beacon template to the FW */
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to update bcn tmpl during vdev up: %d\n",
+ ret);
+ return;
+ }
+
+ arvif->tx_seq_no = 0x1000;
+
+ arvif->aid = 0;
+
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (arvif->vif->mbssid_tx_vif)
+ tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
+
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid,
+ tx_arvif ? tx_arvif->bssid : NULL,
+ info->bssid_index,
+ 1 << info->bssid_indicator);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %d up\n", arvif->vdev_id);
+}
+
+static void ath11k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath11k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ar->hw;
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH11K_CONNECTION_LOSS_HZ);
+}
+
+void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath11k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath11k_vif *arvif = container_of(work, struct ath11k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
+static void ath11k_peer_assoc_h_basic(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 aid;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ aid = vif->cfg.aid;
+ else
+ aid = sta->aid;
+
+ ether_addr_copy(arg->peer_mac, sta->addr);
+ arg->vdev_id = arvif->vdev_id;
+ arg->peer_associd = aid;
+ arg->auth_flag = true;
+ /* TODO: STA WAR in ath10k for listen interval required? */
+ arg->peer_listen_intval = ar->hw->conf.listen_interval;
+ arg->peer_nss = 1;
+ arg->peer_caps = vif->bss_conf.assoc_capability;
+}
+
+static void ath11k_peer_assoc_h_crypto(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ieee80211_bss_conf *info = &vif->bss_conf;
+ struct cfg80211_chan_def def;
+ struct cfg80211_bss *bss;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ const u8 *rsnie = NULL;
+ const u8 *wpaie = NULL;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
+ IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
+
+ if (arvif->rsnie_present || arvif->wpaie_present) {
+ arg->need_ptk_4_way = true;
+ if (arvif->wpaie_present)
+ arg->need_gtk_2_way = true;
+ } else if (bss) {
+ const struct cfg80211_bss_ies *ies;
+
+ rcu_read_lock();
+ rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
+
+ ies = rcu_dereference(bss->ies);
+
+ wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
+ WLAN_OUI_TYPE_MICROSOFT_WPA,
+ ies->data,
+ ies->len);
+ rcu_read_unlock();
+ cfg80211_put_bss(ar->hw->wiphy, bss);
+ }
+
+ /* FIXME: base on RSN IE/WPA IE is a correct idea? */
+ if (rsnie || wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: rsn ie found\n", __func__);
+ arg->need_ptk_4_way = true;
+ }
+
+ if (wpaie) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "%s: wpa ie found\n", __func__);
+ arg->need_gtk_2_way = true;
+ }
+
+ if (sta->mfp) {
+ /* TODO: Need to check if FW supports PMF? */
+ arg->is_pmf_enabled = true;
+ }
+
+ /* TODO: safe_mode_enabled (bypass 4-way handshake) flag req? */
+}
+
+static void ath11k_peer_assoc_h_rates(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+ struct cfg80211_chan_def def;
+ const struct ieee80211_supported_band *sband;
+ const struct ieee80211_rate *rates;
+ enum nl80211_band band;
+ u32 ratemask;
+ u8 rate;
+ int i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ sband = ar->hw->wiphy->bands[band];
+ ratemask = sta->deflink.supp_rates[band];
+ ratemask &= arvif->bitrate_mask.control[band].legacy;
+ rates = sband->bitrates;
+
+ rateset->num_rates = 0;
+
+ for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
+ if (!(ratemask & 1))
+ continue;
+
+ rate = ath11k_mac_bitrate_to_rate(rates->bitrate);
+ rateset->rates[rateset->num_rates] = rate;
+ rateset->num_rates++;
+ }
+}
+
+static bool
+ath11k_peer_assoc_h_ht_masked(const u8 *ht_mcs_mask)
+{
+ int nss;
+
+ for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
+ if (ht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static bool
+ath11k_peer_assoc_h_vht_masked(const u16 *vht_mcs_mask)
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
+ if (vht_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ int i, n;
+ u8 max_nss;
+ u32 stbc;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!ht_cap->ht_supported)
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+
+ if (ath11k_peer_assoc_h_ht_masked(ht_mcs_mask))
+ return;
+
+ arg->ht_flag = true;
+
+ arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ht_cap->ampdu_factor)) - 1;
+
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(ht_cap->ampdu_density);
+
+ arg->peer_ht_caps = ht_cap->cap;
+ arg->peer_rate_caps |= WMI_HOST_RC_HT_FLAG;
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
+ arg->ldpc_flag = true;
+
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+ arg->bw_40 = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
+ }
+
+ /* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20
+ * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset
+ * both flags if guard interval is Default GI
+ */
+ if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI)
+ arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40);
+
+ if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
+ if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40))
+ arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
+ arg->peer_rate_caps |= WMI_HOST_RC_TX_STBC_FLAG;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
+ stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
+ stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc = stbc << WMI_HOST_RC_RX_STBC_FLAG_S;
+ arg->peer_rate_caps |= stbc;
+ arg->stbc_flag = true;
+ }
+
+ if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
+ arg->peer_rate_caps |= WMI_HOST_RC_TS_FLAG;
+ else if (ht_cap->mcs.rx_mask[1])
+ arg->peer_rate_caps |= WMI_HOST_RC_DS_FLAG;
+
+ for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
+ if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
+ (ht_mcs_mask[i / 8] & BIT(i % 8))) {
+ max_nss = (i / 8) + 1;
+ arg->peer_ht_rates.rates[n++] = i;
+ }
+
+ /* This is a workaround for HT-enabled STAs which break the spec
+ * and have no HT capabilities RX mask (no HT RX MCS map).
+ *
+ * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
+ * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
+ *
+ * Firmware asserts if such situation occurs.
+ */
+ if (n == 0) {
+ arg->peer_ht_rates.num_rates = 8;
+ for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
+ arg->peer_ht_rates.rates[i] = i;
+ } else {
+ arg->peer_ht_rates.num_rates = n;
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "ht peer %pM mcs cnt %d nss %d\n",
+ arg->peer_mac,
+ arg->peer_ht_rates.num_rates,
+ arg->peer_nss);
+}
+
+static int ath11k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ }
+ return 0;
+}
+
+static u16
+ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
+ const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
+ vht_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
+ break;
+ case 9:
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static u8 ath11k_get_nss_160mhz(struct ath11k *ar,
+ u8 max_nss)
+{
+ u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info;
+ u8 max_sup_nss = 0;
+
+ switch (nss_ratio_info) {
+ case WMI_NSS_RATIO_1BY2_NSS:
+ max_sup_nss = max_nss >> 1;
+ break;
+ case WMI_NSS_RATIO_3BY4_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n");
+ break;
+ case WMI_NSS_RATIO_1_NSS:
+ max_sup_nss = max_nss;
+ break;
+ case WMI_NSS_RATIO_2_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n");
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid nss ratio received from firmware: %d\n",
+ nss_ratio_info);
+ break;
+ }
+
+ return max_sup_nss;
+}
+
+static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u16 *vht_mcs_mask;
+ u8 ampdu_factor;
+ u8 max_nss, vht_mcs;
+ int i, vht_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!vht_cap->vht_supported)
+ return;
+
+ band = def.chan->band;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+
+ if (ath11k_peer_assoc_h_vht_masked(vht_mcs_mask))
+ return;
+
+ arg->vht_flag = true;
+
+ /* TODO: similar flags required? */
+ arg->vht_capable = true;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ arg->vht_ng_flag = true;
+
+ arg->peer_vht_caps = vht_cap->cap;
+
+ ampdu_factor = (vht_cap->cap &
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+ /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
+ * zero in VHT IE. Using it would result in degraded throughput.
+ * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
+ * it if VHT max_mpdu is smaller.
+ */
+ arg->peer_max_mpdu = max(arg->peer_max_mpdu,
+ (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1);
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+
+ if (vht_nss > sta->deflink.rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (vht_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting vht range mcs value to peer supported nss %d for peer %pM\n",
+ sta->deflink.rx_nss, sta->addr);
+ vht_mcs_mask[sta->deflink.rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+ }
+
+ /* Calculate peer NSS capability from VHT capabilities if STA
+ * supports VHT.
+ */
+ for (i = 0, max_nss = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
+ (2 * i) & 3;
+
+ if (vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED &&
+ vht_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+ arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
+ arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
+ arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
+ arg->tx_mcs_set = ath11k_peer_assoc_h_vht_limit(
+ __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
+
+ /* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
+ * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
+ * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
+ */
+ arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
+ arg->tx_mcs_set |= IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11;
+
+ if ((arg->tx_mcs_set & IEEE80211_VHT_MCS_NOT_SUPPORTED) ==
+ IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ arg->peer_vht_caps &= ~IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+ /* TODO: Check */
+ arg->tx_max_mcs_nss = 0xFF;
+
+ if (arg->peer_phymode == MODE_11AC_VHT160 ||
+ arg->peer_phymode == MODE_11AC_VHT80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AC_VHT160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags,
+ arg->peer_bw_rxnss_override);
+}
+
+static int ath11k_mac_get_max_he_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1;
+ }
+ return 0;
+}
+
+static u16 ath11k_peer_assoc_h_he_limit(u16 tx_mcs_set,
+ const u16 he_mcs_limit[NL80211_HE_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_he_mcs_map(tx_mcs_set, nss) &
+ he_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0 ... 7:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ case 9:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+ break;
+ case 10:
+ case 11:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ break;
+ }
+
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static bool
+ath11k_peer_assoc_h_he_masked(const u16 *he_mcs_mask)
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++)
+ if (he_mcs_mask[nss])
+ return false;
+
+ return true;
+}
+
+static void ath11k_peer_assoc_h_he(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ enum nl80211_band band;
+ u16 he_mcs_mask[NL80211_HE_NSS_MAX];
+ u8 max_nss, he_mcs;
+ u16 he_tx_mcs = 0, v = 0;
+ int i, he_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
+ u8 ampdu_factor, rx_mcs_80, rx_mcs_160;
+ u16 mcs_160_map, mcs_80_map;
+ bool support_160;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ if (!he_cap->has_he)
+ return;
+
+ band = def.chan->band;
+ memcpy(he_mcs_mask, arvif->bitrate_mask.control[band].he_mcs,
+ sizeof(he_mcs_mask));
+
+ if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
+ return;
+
+ arg->he_flag = true;
+ support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G);
+
+ /* Supported HE-MCS and NSS Set of peer he_cap is intersection with self he_cp */
+ mcs_160_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ mcs_80_map = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+
+ if (support_160) {
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_160 = (mcs_160_map >> (2 * i)) & 3;
+
+ if (mcs_160 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ rx_mcs_160 = i + 1;
+ break;
+ }
+ }
+ }
+
+ for (i = 7; i >= 0; i--) {
+ u8 mcs_80 = (mcs_80_map >> (2 * i)) & 3;
+
+ if (mcs_80 != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
+ rx_mcs_80 = i + 1;
+ break;
+ }
+ }
+
+ if (support_160)
+ max_nss = min(rx_mcs_80, rx_mcs_160);
+ else
+ max_nss = rx_mcs_80;
+
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+
+ memcpy_and_pad(&arg->peer_he_cap_macinfo,
+ sizeof(arg->peer_he_cap_macinfo),
+ he_cap->he_cap_elem.mac_cap_info,
+ sizeof(he_cap->he_cap_elem.mac_cap_info),
+ 0);
+ memcpy_and_pad(&arg->peer_he_cap_phyinfo,
+ sizeof(arg->peer_he_cap_phyinfo),
+ he_cap->he_cap_elem.phy_cap_info,
+ sizeof(he_cap->he_cap_elem.phy_cap_info),
+ 0);
+ arg->peer_he_ops = vif->bss_conf.he_oper.params;
+
+ /* the top most byte is used to indicate BSS color info */
+ arg->peer_he_ops &= 0xffffff;
+
+ /* As per section 26.6.1 11ax Draft5.0, if the Max AMPDU Exponent Extension
+ * in HE cap is zero, use the arg->peer_max_mpdu as calculated while parsing
+ * VHT caps(if VHT caps is present) or HT caps (if VHT caps is not present).
+ *
+ * For non-zero value of Max AMPDU Extponent Extension in HE MAC caps,
+ * if a HE STA sends VHT cap and HE cap IE in assoc request then, use
+ * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length.
+ * If a HE STA that does not send VHT cap, but HE and HT cap in assoc
+ * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
+ * length.
+ */
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
+
+ if (ampdu_factor) {
+ if (sta->deflink.vht_cap.vht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ else if (sta->deflink.ht_cap.ht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ }
+
+ if (he_cap->he_cap_elem.phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ int bit = 7;
+ int nss, ru;
+
+ arg->peer_ppet.numss_m1 = he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_NSS_MASK;
+ arg->peer_ppet.ru_bit_mask =
+ (he_cap->ppe_thres[0] &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+
+ for (nss = 0; nss <= arg->peer_ppet.numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u32 val = 0;
+ int i;
+
+ if ((arg->peer_ppet.ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ for (i = 0; i < 6; i++) {
+ val >>= 1;
+ val |= ((he_cap->ppe_thres[bit / 8] >>
+ (bit % 8)) & 0x1) << 5;
+ bit++;
+ }
+ arg->peer_ppet.ppet16_ppet8_ru3_ru0[nss] |=
+ val << (ru * 6);
+ }
+ }
+ }
+
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_RES)
+ arg->twt_responder = true;
+ if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
+ arg->twt_requester = true;
+
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ if (he_nss > sta->deflink.rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->deflink.rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (he_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting he range mcs value to peer supported nss %d for peer %pM\n",
+ sta->deflink.rx_nss, sta->addr);
+ he_mcs_mask[sta->deflink.rx_nss - 1] = he_mcs_mask[he_nss - 1];
+ }
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+
+ arg->peer_he_mcs_count++;
+ he_tx_mcs = v;
+ }
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
+
+ arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
+ fallthrough;
+
+ default:
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
+ arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
+ arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
+
+ arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
+ break;
+ }
+
+ /* Calculate peer NSS capability from HE capabilities if STA
+ * supports HE.
+ */
+ for (i = 0, max_nss = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = he_tx_mcs >> (2 * i) & 3;
+
+ /* In case of fixed rates, MCS Range in he_tx_mcs might have
+ * unsupported range, with he_mcs_mask set, so check either of them
+ * to find nss.
+ */
+ if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ he_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+
+ if (arg->peer_phymode == MODE_11AX_HE160 ||
+ arg->peer_phymode == MODE_11AX_HE80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AX_HE160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "he peer %pM nss %d mcs cnt %d nss_override 0x%x\n",
+ sta->addr, arg->peer_nss,
+ arg->peer_he_mcs_count,
+ arg->peer_bw_rxnss_override);
+}
+
+static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 ampdu_factor;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
+ return;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
+ arg->peer_he_caps_6ghz));
+
+ /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+ * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+ * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+ * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+ * Band Capabilities element in the 6 GHz band.
+ *
+ * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+ * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+ */
+ ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK,
+ he_cap->he_cap_elem.mac_cap_info[3]) +
+ FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP,
+ arg->peer_he_caps_6ghz);
+
+ arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+}
+
+static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+ int smps;
+
+ if (!ht_cap->ht_supported && !sta->deflink.he_6ghz_capa.capa)
+ return;
+
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+ }
+
+ switch (smps) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ arg->static_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ arg->dynamic_mimops_flag = true;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ arg->spatial_mux_flag = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void ath11k_peer_assoc_h_qos(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ if (sta->wme) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ /* TODO: Check WME vs QoS */
+ arg->is_wme_set = true;
+ arg->apsd_flag = true;
+ arg->peer_rate_caps |= WMI_HOST_RC_UAPSD_FLAG;
+ }
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (sta->wme) {
+ arg->is_wme_set = true;
+ arg->qos_flag = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM qos %d\n",
+ sta->addr, arg->qos_flag);
+}
+
+static int ath11k_peer_assoc_qos_ap(struct ath11k *ar,
+ struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ap_ps_params params;
+ u32 max_sp;
+ u32 uapsd;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ params.vdev_id = arvif->vdev_id;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "uapsd_queues 0x%x max_sp %d\n",
+ sta->uapsd_queues, sta->max_sp);
+
+ uapsd = 0;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+ uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+ uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+ uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+ uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
+
+ max_sp = 0;
+ if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
+ max_sp = sta->max_sp;
+
+ params.param = WMI_AP_PS_PEER_PARAM_UAPSD;
+ params.value = uapsd;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
+ params.value = max_sp;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ /* TODO revisit during testing */
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ params.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
+ params.value = DISABLE_SIFS_RESPONSE_TRIGGER;
+ ret = ath11k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &params);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ ath11k_warn(ar->ab, "failed to set ap ps peer param %d for vdev %i: %d\n",
+ params.param, arvif->vdev_id, ret);
+ return ret;
+}
+
+static bool ath11k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+{
+ return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+ ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_vht(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ switch (sta->deflink.vht_cap.cap &
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+ return MODE_11AC_VHT160;
+ case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
+ return MODE_11AC_VHT80_80;
+ default:
+ /* not sure if this is a valid case? */
+ return MODE_11AC_VHT160;
+ }
+ }
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AC_VHT80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AC_VHT40;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AC_VHT20;
+
+ return MODE_UNKNOWN;
+}
+
+static enum wmi_phy_mode ath11k_mac_get_phymode_he(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
+ if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return MODE_11AX_HE160;
+ else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return MODE_11AX_HE80_80;
+ /* not sure if this is a valid case? */
+ return MODE_11AX_HE160;
+ }
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ return MODE_11AX_HE80;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ return MODE_11AX_HE40;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ return MODE_11AX_HE20;
+
+ return MODE_UNKNOWN;
+}
+
+static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ enum wmi_phy_mode phymode = MODE_UNKNOWN;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ if (sta->deflink.he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ phymode = MODE_11AX_HE80_2G;
+ else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AX_HE40_2G;
+ else
+ phymode = MODE_11AX_HE20_2G;
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11AC_VHT40;
+ else
+ phymode = MODE_11AC_VHT20;
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NG_HT40;
+ else
+ phymode = MODE_11NG_HT20;
+ } else if (ath11k_mac_sta_has_ofdm_only(sta)) {
+ phymode = MODE_11G;
+ } else {
+ phymode = MODE_11B;
+ }
+ break;
+ case NL80211_BAND_5GHZ:
+ case NL80211_BAND_6GHZ:
+ /* Check HE first */
+ if (sta->deflink.he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
+ phymode = ath11k_mac_get_phymode_he(ar, sta);
+ } else if (sta->deflink.vht_cap.vht_supported &&
+ !ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
+ phymode = ath11k_mac_get_phymode_vht(ar, sta);
+ } else if (sta->deflink.ht_cap.ht_supported &&
+ !ath11k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
+ if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+ phymode = MODE_11NA_HT40;
+ else
+ phymode = MODE_11NA_HT20;
+ } else {
+ phymode = MODE_11A;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "peer %pM phymode %s\n",
+ sta->addr, ath11k_wmi_phymode_str(phymode));
+
+ arg->peer_phymode = phymode;
+ WARN_ON(phymode == MODE_UNKNOWN);
+}
+
+static void ath11k_peer_assoc_prepare(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg,
+ bool reassoc)
+{
+ struct ath11k_sta *arsta;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ memset(arg, 0, sizeof(*arg));
+
+ reinit_completion(&ar->peer_assoc_done);
+
+ arg->peer_new_assoc = !reassoc;
+ ath11k_peer_assoc_h_basic(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_crypto(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_ht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_vht(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he_6ghz(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_smps(sta, arg);
+
+ arsta->peer_nss = arg->peer_nss;
+
+ /* TODO: amsdu_disable req? */
+}
+
+static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif,
+ const u8 *addr,
+ const struct ieee80211_sta_ht_cap *ht_cap,
+ u16 he_6ghz_capa)
+{
+ int smps;
+
+ if (!ht_cap->ht_supported && !he_6ghz_capa)
+ return 0;
+
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa);
+ }
+
+ if (smps >= ARRAY_SIZE(ath11k_smps_map))
+ return -EINVAL;
+
+ return ath11k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE,
+ ath11k_smps_map[smps]);
+}
+
+static bool ath11k_mac_set_he_txbf_conf(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 param, value;
+ int ret;
+
+ if (!arvif->vif->bss_conf.he_support)
+ return true;
+
+ param = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ value = 0;
+ if (arvif->vif->bss_conf.he_su_beamformer) {
+ value |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE);
+ if (arvif->vif->bss_conf.he_mu_beamformer &&
+ arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= FIELD_PREP(HE_MODE_MU_TX_BFER, HE_MU_BFER_ENABLE);
+ }
+
+ if (arvif->vif->type != NL80211_IFTYPE_MESH_POINT) {
+ value |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
+ FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
+
+ if (arvif->vif->bss_conf.he_full_ul_mumimo)
+ value |= FIELD_PREP(HE_MODE_UL_MUMIMO, HE_UL_MUMIMO_ENABLE);
+
+ if (arvif->vif->bss_conf.he_su_beamformee)
+ value |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
+ }
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d HE MU mode: %d\n",
+ arvif->vdev_id, ret);
+ return false;
+ }
+
+ param = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+ value = FIELD_PREP(HE_VHT_SOUNDING_MODE, HE_VHT_SOUNDING_MODE_ENABLE) |
+ FIELD_PREP(HE_TRIG_NONTRIG_SOUNDING_MODE,
+ HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d sounding mode: %d\n",
+ arvif->vdev_id, ret);
+ return false;
+ }
+ return true;
+}
+
+static bool ath11k_mac_vif_recalc_sta_he_txbf(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta_he_cap *he_cap)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_he_cap_elem he_cap_elem = {0};
+ struct ieee80211_sta_he_cap *cap_band = NULL;
+ struct cfg80211_chan_def def;
+ u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE;
+ u32 hemode = 0;
+ int ret;
+
+ if (!vif->bss_conf.he_support)
+ return true;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return false;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return false;
+
+ if (def.chan->band == NL80211_BAND_2GHZ)
+ cap_band = &ar->mac.iftype[NL80211_BAND_2GHZ][vif->type].he_cap;
+ else
+ cap_band = &ar->mac.iftype[NL80211_BAND_5GHZ][vif->type].he_cap;
+
+ memcpy(&he_cap_elem, &cap_band->he_cap_elem, sizeof(he_cap_elem));
+
+ if (HECAP_PHY_SUBFME_GET(he_cap_elem.phy_cap_info)) {
+ if (HECAP_PHY_SUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
+ hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
+ if (HECAP_PHY_MUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
+ hemode |= FIELD_PREP(HE_MODE_MU_TX_BFEE, HE_MU_BFEE_ENABLE);
+ }
+
+ if (vif->type != NL80211_IFTYPE_MESH_POINT) {
+ hemode |= FIELD_PREP(HE_MODE_DL_OFDMA, HE_DL_MUOFDMA_ENABLE) |
+ FIELD_PREP(HE_MODE_UL_OFDMA, HE_UL_MUOFDMA_ENABLE);
+
+ if (HECAP_PHY_ULMUMIMO_GET(he_cap_elem.phy_cap_info))
+ if (HECAP_PHY_ULMUMIMO_GET(he_cap->he_cap_elem.phy_cap_info))
+ hemode |= FIELD_PREP(HE_MODE_UL_MUMIMO,
+ HE_UL_MUMIMO_ENABLE);
+
+ if (FIELD_GET(HE_MODE_MU_TX_BFEE, hemode))
+ hemode |= FIELD_PREP(HE_MODE_SU_TX_BFEE, HE_SU_BFEE_ENABLE);
+
+ if (FIELD_GET(HE_MODE_MU_TX_BFER, hemode))
+ hemode |= FIELD_PREP(HE_MODE_SU_TX_BFER, HE_SU_BFER_ENABLE);
+ }
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, hemode);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit vdev param txbf 0x%x: %d\n",
+ hemode, ret);
+ return false;
+ }
+
+ return true;
+}
+
+static void ath11k_bss_assoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct peer_assoc_params peer_arg;
+ struct ieee80211_sta *ap_sta;
+ struct ath11k_peer *peer;
+ bool is_auth = false;
+ struct ieee80211_sta_he_cap he_cap;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i assoc bssid %pM aid %d\n",
+ arvif->vdev_id, arvif->bssid, arvif->aid);
+
+ rcu_read_lock();
+
+ ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (!ap_sta) {
+ ath11k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ /* he_cap here is updated at assoc success for sta mode only */
+ he_cap = ap_sta->deflink.he_cap;
+
+ ath11k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg, false);
+
+ rcu_read_unlock();
+
+ peer_arg.is_assoc = true;
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
+ bss_conf->bssid, arvif->vdev_id, ret);
+ return;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ bss_conf->bssid, arvif->vdev_id);
+ return;
+ }
+
+ ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
+ &ap_sta->deflink.ht_cap,
+ le16_to_cpu(ap_sta->deflink.he_6ghz_capa.capa));
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ if (!ath11k_mac_vif_recalc_sta_he_txbf(ar, vif, &he_cap)) {
+ ath11k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM\n",
+ arvif->vdev_id, bss_conf->bssid);
+ return;
+ }
+
+ WARN_ON(arvif->is_up);
+
+ arvif->aid = vif->cfg.aid;
+ ether_addr_copy(arvif->bssid, bss_conf->bssid);
+
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid,
+ NULL, 0, 0);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d up: %d\n",
+ arvif->vdev_id, ret);
+ return;
+ }
+
+ arvif->is_up = true;
+ arvif->rekey_data.enable_offload = false;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "vdev %d up (associated) bssid %pM aid %d\n",
+ arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, arvif->bssid);
+ if (peer && peer->is_authorized)
+ is_auth = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (is_auth) {
+ ret = ath11k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize BSS peer: %d\n", ret);
+ }
+
+ ret = ath11k_wmi_send_obss_spr_cmd(ar, arvif->vdev_id,
+ &bss_conf->he_obss_pd);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set vdev %i OBSS PD parameters: %d\n",
+ arvif->vdev_id, ret);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_DTIM_POLICY_STICK);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set vdev %d dtim policy: %d\n",
+ arvif->vdev_id, ret);
+
+ ath11k_mac_11d_scan_stop_all(ar->ab);
+}
+
+static void ath11k_bss_disassoc(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %i disassoc bssid %pM\n",
+ arvif->vdev_id, arvif->bssid);
+
+ ret = ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to down vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_up = false;
+
+ memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+}
+
+static u32 ath11k_mac_get_rate_hw_value(int bitrate)
+{
+ u32 preamble;
+ u16 hw_value;
+ int rate;
+ size_t i;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_legacy_rates); i++) {
+ if (ath11k_legacy_rates[i].bitrate != bitrate)
+ continue;
+
+ hw_value = ath11k_legacy_rates[i].hw_value;
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ return rate;
+ }
+
+ return -EINVAL;
+}
+
+static void ath11k_recalculate_mgmt_rate(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *def)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ const struct ieee80211_supported_band *sband;
+ u8 basic_rate_idx;
+ int hw_rate_code;
+ u32 vdev_param;
+ u16 bitrate;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ sband = ar->hw->wiphy->bands[def->chan->band];
+ basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+ bitrate = sband->bitrates[basic_rate_idx].bitrate;
+
+ hw_rate_code = ath11k_mac_get_rate_hw_value(bitrate);
+ if (hw_rate_code < 0) {
+ ath11k_warn(ar->ab, "bitrate not supported %d\n", bitrate);
+ return;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+
+ /* For WCN6855, firmware will clear this param when vdev starts, hence
+ * cache it here so that we can reconfigure it once vdev starts.
+ */
+ ar->hw_rate_code = hw_rate_code;
+
+ vdev_param = WMI_VDEV_PARAM_BEACON_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ hw_rate_code);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
+}
+
+static int ath11k_mac_fils_discovery(struct ath11k_vif *arvif,
+ struct ieee80211_bss_conf *info)
+{
+ struct ath11k *ar = arvif->ar;
+ struct sk_buff *tmpl;
+ int ret;
+ u32 interval;
+ bool unsol_bcast_probe_resp_enabled = false;
+
+ if (info->fils_discovery.max_interval) {
+ interval = info->fils_discovery.max_interval;
+
+ tmpl = ieee80211_get_fils_discovery_tmpl(ar->hw, arvif->vif);
+ if (tmpl)
+ ret = ath11k_wmi_fils_discovery_tmpl(ar, arvif->vdev_id,
+ tmpl);
+ } else if (info->unsol_bcast_probe_resp_interval) {
+ unsol_bcast_probe_resp_enabled = 1;
+ interval = info->unsol_bcast_probe_resp_interval;
+
+ tmpl = ieee80211_get_unsol_bcast_probe_resp_tmpl(ar->hw,
+ arvif->vif);
+ if (tmpl)
+ ret = ath11k_wmi_probe_resp_tmpl(ar, arvif->vdev_id,
+ tmpl);
+ } else { /* Disable */
+ return ath11k_wmi_fils_discovery(ar, arvif->vdev_id, 0, false);
+ }
+
+ if (!tmpl) {
+ ath11k_warn(ar->ab,
+ "mac vdev %i failed to retrieve %s template\n",
+ arvif->vdev_id, (unsol_bcast_probe_resp_enabled ?
+ "unsolicited broadcast probe response" :
+ "FILS discovery"));
+ return -EPERM;
+ }
+ kfree_skb(tmpl);
+
+ if (!ret)
+ ret = ath11k_wmi_fils_discovery(ar, arvif->vdev_id, interval,
+ unsol_bcast_probe_resp_enabled);
+
+ return ret;
+}
+
+static int ath11k_mac_config_obss_pd(struct ath11k *ar,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ u32 bitmap[2], param_id, param_val, pdev_id;
+ int ret;
+ s8 non_srg_th = 0, srg_th = 0;
+
+ pdev_id = ar->pdev->pdev_id;
+
+ /* Set and enable SRG/non-SRG OBSS PD Threshold */
+ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set obss_pd_threshold for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "obss pd sr_ctrl %x non_srg_thres %u srg_max %u\n",
+ he_obss_pd->sr_ctrl, he_obss_pd->non_srg_max_offset,
+ he_obss_pd->max_offset);
+
+ param_val = 0;
+
+ if (he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED) {
+ non_srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD;
+ } else {
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ non_srg_th = (ATH11K_OBSS_PD_MAX_THRESHOLD +
+ he_obss_pd->non_srg_max_offset);
+ else
+ non_srg_th = ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD;
+
+ param_val |= ATH11K_OBSS_PD_NON_SRG_EN;
+ }
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT) {
+ srg_th = ATH11K_OBSS_PD_MAX_THRESHOLD + he_obss_pd->max_offset;
+ param_val |= ATH11K_OBSS_PD_SRG_EN;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ param_val |= ATH11K_OBSS_PD_THRESHOLD_IN_DBM;
+ param_val |= FIELD_PREP(GENMASK(15, 8), srg_th);
+ } else {
+ non_srg_th -= ATH11K_DEFAULT_NOISE_FLOOR;
+ /* SRG not supported and threshold in dB */
+ param_val &= ~(ATH11K_OBSS_PD_SRG_EN |
+ ATH11K_OBSS_PD_THRESHOLD_IN_DBM);
+ }
+
+ param_val |= (non_srg_th & GENMASK(7, 0));
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set obss_pd_threshold for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable OBSS PD for all access category */
+ param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC;
+ param_val = 0xf;
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set obss_pd_per_ac for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Set SR Prohibit */
+ param_id = WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT;
+ param_val = !!(he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED);
+ ret = ath11k_wmi_pdev_set_param(ar, param_id, param_val, pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sr_prohibit for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ if (!test_bit(WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT,
+ ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ /* Set SRG BSS Color Bitmap */
+ memcpy(bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
+ ret = ath11k_wmi_pdev_set_srg_bss_color_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set bss_color_bitmap for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Set SRG Partial BSSID Bitmap */
+ memcpy(bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
+ ret = ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set partial_bssid_bitmap for pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ memset(bitmap, 0xff, sizeof(bitmap));
+
+ /* Enable all BSS Colors for SRG */
+ ret = ath11k_wmi_pdev_srg_obss_color_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set srg_color_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable all partial BSSID mask for SRG */
+ ret = ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set srg_bssid_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable all BSS Colors for non-SRG */
+ ret = ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set non_srg_color_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ /* Enable all partial BSSID mask for non-SRG */
+ ret = ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(ar, bitmap);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set non_srg_bssid_en_bitmap pdev: %u\n",
+ pdev_id);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath11k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ u32 param_id, param_value;
+ enum nl80211_band band;
+ u32 vdev_param;
+ int mcast_rate;
+ u32 preamble;
+ u16 hw_value;
+ u16 bitrate;
+ int ret = 0;
+ u8 rateidx;
+ u32 rate, param;
+ u32 ipv4_cnt;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & BSS_CHANGED_BEACON_INT) {
+ arvif->beacon_interval = info->beacon_int;
+
+ param_id = WMI_VDEV_PARAM_BEACON_INTERVAL;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->beacon_interval);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon interval for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Beacon interval: %d set for VDEV: %d\n",
+ arvif->beacon_interval, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_BEACON) {
+ param_id = WMI_PDEV_PARAM_BEACON_TX_MODE;
+ param_value = WMI_BEACON_STAGGERED_MODE;
+ ret = ath11k_wmi_pdev_set_param(ar, param_id,
+ param_value, ar->pdev->pdev_id);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set staggered beacon mode for VDEV: %d\n",
+ arvif->vdev_id);
+
+ if (!arvif->do_not_send_tmpl || !arvif->bcca_zero_sent) {
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to update bcn template: %d\n",
+ ret);
+ }
+
+ if (arvif->bcca_zero_sent)
+ arvif->do_not_send_tmpl = true;
+ else
+ arvif->do_not_send_tmpl = false;
+
+ if (vif->bss_conf.he_support) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_BA_MODE,
+ WMI_BA_MODE_BUFFER_SIZE_256);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set BA BUFFER SIZE 256 for vdev: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set BA BUFFER SIZE 256 for VDEV: %d\n",
+ arvif->vdev_id);
+ }
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
+ arvif->dtim_period = info->dtim_period;
+
+ param_id = WMI_VDEV_PARAM_DTIM_PERIOD;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ arvif->dtim_period);
+
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set dtim period for VDEV %d: %i\n",
+ arvif->vdev_id, ret);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "DTIM period: %d set for VDEV: %d\n",
+ arvif->dtim_period, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_SSID &&
+ vif->type == NL80211_IFTYPE_AP) {
+ arvif->u.ap.ssid_len = vif->cfg.ssid_len;
+ if (vif->cfg.ssid_len)
+ memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
+ vif->cfg.ssid_len);
+ arvif->u.ap.hidden_ssid = info->hidden_ssid;
+ }
+
+ if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
+ ether_addr_copy(arvif->bssid, info->bssid);
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED) {
+ if (info->enable_beacon)
+ ath11k_mac_set_he_txbf_conf(arvif);
+ ath11k_control_beaconing(arvif, info);
+
+ if (arvif->is_up && vif->bss_conf.he_support &&
+ vif->bss_conf.he_oper.params) {
+ param_id = WMI_VDEV_PARAM_HEOPS_0_31;
+ param_value = vif->bss_conf.he_oper.params;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "he oper param: %x set for VDEV: %d\n",
+ param_value, arvif->vdev_id);
+
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set he oper params %x for VDEV %d: %i\n",
+ param_value, arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT) {
+ u32 cts_prot;
+
+ cts_prot = !!(info->use_cts_prot);
+ param_id = WMI_VDEV_PARAM_PROTECTION_MODE;
+
+ if (arvif->is_started) {
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, cts_prot);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set CTS prot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "Set CTS prot: %d for VDEV: %d\n",
+ cts_prot, arvif->vdev_id);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "defer protection mode setup, vdev is not ready yet\n");
+ }
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ u32 slottime;
+
+ if (info->use_short_slot)
+ slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
+
+ else
+ slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
+
+ param_id = WMI_VDEV_PARAM_SLOT_TIME;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, slottime);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set erp slot for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set slottime: %d for VDEV: %d\n",
+ slottime, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ u32 preamble;
+
+ if (info->use_short_preamble)
+ preamble = WMI_VDEV_PREAMBLE_SHORT;
+ else
+ preamble = WMI_VDEV_PREAMBLE_LONG;
+
+ param_id = WMI_VDEV_PARAM_PREAMBLE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, preamble);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set preamble for VDEV: %d\n",
+ arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Set preamble: %d for VDEV: %d\n",
+ preamble, arvif->vdev_id);
+ }
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc)
+ ath11k_bss_assoc(hw, vif, info);
+ else
+ ath11k_bss_disassoc(hw, vif);
+ }
+
+ if (changed & BSS_CHANGED_TXPOWER) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev_id %i txpower %d\n",
+ arvif->vdev_id, info->txpower);
+
+ arvif->txpower = info->txpower;
+ ath11k_mac_txpower_recalc(ar);
+ }
+
+ if (changed & BSS_CHANGED_PS &&
+ ar->ab->hw_params.supports_sta_ps) {
+ arvif->ps = vif->cfg.ps;
+
+ ret = ath11k_mac_config_ps(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to setup ps on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE &&
+ !ath11k_mac_vif_chan(arvif->vif, &def)) {
+ band = def.chan->band;
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+
+ if (mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+ if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+ rateidx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ bitrate = ath11k_legacy_rates[rateidx].bitrate;
+ hw_value = ath11k_legacy_rates[rateidx].hw_value;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ rate = ATH11K_HW_RATE_CODE(hw_value, 0, preamble);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "vdev %d mcast_rate %x\n",
+ arvif->vdev_id, rate);
+
+ vdev_param = WMI_VDEV_PARAM_MCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set mcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ vdev_param = WMI_VDEV_PARAM_BCAST_DATA_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set bcast rate on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES &&
+ !ath11k_mac_vif_chan(arvif->vif, &def))
+ ath11k_recalculate_mgmt_rate(ar, vif, &def);
+
+ if (changed & BSS_CHANGED_TWT) {
+ struct wmi_twt_enable_params twt_params = {0};
+
+ if (info->twt_requester || info->twt_responder) {
+ ath11k_wmi_fill_default_twt_params(&twt_params);
+ ath11k_wmi_send_twt_enable_cmd(ar, ar->pdev->pdev_id,
+ &twt_params);
+ } else {
+ ath11k_wmi_send_twt_disable_cmd(ar, ar->pdev->pdev_id);
+ }
+ }
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ ath11k_mac_config_obss_pd(ar, &info->he_obss_pd);
+
+ if (changed & BSS_CHANGED_HE_BSS_COLOR) {
+ if (vif->type == NL80211_IFTYPE_AP) {
+ ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
+ ar, arvif->vdev_id, info->he_bss_color.color,
+ ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS,
+ info->he_bss_color.enabled);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ param_id = WMI_VDEV_PARAM_BSS_COLOR;
+ if (info->he_bss_color.enabled)
+ param_value = info->he_bss_color.color <<
+ IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET;
+ else
+ param_value = IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id,
+ param_value);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to set bss color param on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "bss color param 0x%x set on vdev %i\n",
+ param_value, arvif->vdev_id);
+ } else if (vif->type == NL80211_IFTYPE_STATION) {
+ ret = ath11k_wmi_send_bss_color_change_enable_cmd(ar,
+ arvif->vdev_id,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to enable bss color change on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ ret = ath11k_wmi_send_obss_color_collision_cfg_cmd(
+ ar, arvif->vdev_id, 0,
+ ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS, 1);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set bss color collision on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+ }
+
+ if (changed & BSS_CHANGED_FTM_RESPONDER &&
+ arvif->ftm_responder != info->ftm_responder &&
+ test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map) &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ arvif->ftm_responder = info->ftm_responder;
+ param = WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param,
+ arvif->ftm_responder);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to set ftm responder %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ if (changed & BSS_CHANGED_FILS_DISCOVERY ||
+ changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP)
+ ath11k_mac_fils_discovery(arvif, info);
+
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ ipv4_cnt = min(vif->cfg.arp_addr_cnt, ATH11K_IPV4_MAX_COUNT);
+ memcpy(arvif->arp_ns_offload.ipv4_addr,
+ vif->cfg.arp_addr_list,
+ ipv4_cnt * sizeof(u32));
+ memcpy(arvif->arp_ns_offload.mac_addr, vif->addr, ETH_ALEN);
+ arvif->arp_ns_offload.ipv4_count = ipv4_cnt;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
+ vif->cfg.arp_addr_cnt,
+ vif->addr, arvif->arp_ns_offload.ipv4_addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+void __ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ if (ar->scan.is_roc && ar->scan.roc_notify)
+ ieee80211_remain_on_channel_expired(ar->hw);
+ fallthrough;
+ case ATH11K_SCAN_STARTING:
+ if (!ar->scan.is_roc) {
+ struct cfg80211_scan_info info = {
+ .aborted = ((ar->scan.state ==
+ ATH11K_SCAN_ABORTING) ||
+ (ar->scan.state ==
+ ATH11K_SCAN_STARTING)),
+ };
+
+ ieee80211_scan_completed(ar->hw, &info);
+ }
+
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ ar->scan_channel = NULL;
+ ar->scan.roc_freq = 0;
+ cancel_delayed_work(&ar->scan.timeout);
+ complete_all(&ar->scan.completed);
+ break;
+ }
+}
+
+void ath11k_mac_scan_finish(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath11k_scan_stop(struct ath11k *ar)
+{
+ struct scan_cancel_param arg = {
+ .req_type = WLAN_SCAN_CANCEL_SINGLE,
+ .scan_id = ATH11K_SCAN_ID,
+ };
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* TODO: Fill other STOP Params */
+ arg.pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_send_scan_stop_cmd(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop wmi scan: %d\n", ret);
+ goto out;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ar->ab,
+ "failed to receive scan abort comple: timed out\n");
+ ret = -ETIMEDOUT;
+ } else if (ret > 0) {
+ ret = 0;
+ }
+
+out:
+ /* Scan state should be updated upon scan completion but in case
+ * firmware fails to deliver the event (for whatever reason) it is
+ * desired to clean up scan state anyway. Firmware may have just
+ * dropped the scan completion event delivery due to transport pipe
+ * being overflown with data and/or it can recover on its own before
+ * next scan request is submitted.
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state != ATH11K_SCAN_IDLE)
+ __ath11k_mac_scan_finish(ar);
+ spin_unlock_bh(&ar->data_lock);
+
+ return ret;
+}
+
+static void ath11k_scan_abort(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ /* This can happen if timeout worker kicked in and called
+ * abortion while scan completion was being processed.
+ */
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "refusing scan abortion due to invalid scan state: %d\n",
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ ar->scan.state = ATH11K_SCAN_ABORTING;
+ spin_unlock_bh(&ar->data_lock);
+
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to abort scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath11k_scan_timeout_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ scan.timeout.work);
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_start_scan(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ int ret;
+ unsigned long timeout = 1 * HZ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath11k_spectral_get_mode(ar) == ATH11K_SPECTRAL_BACKGROUND)
+ ath11k_spectral_reset_buffer(ar);
+
+ ret = ath11k_wmi_send_scan_start_cmd(ar, arg);
+ if (ret)
+ return ret;
+
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map)) {
+ timeout = 5 * HZ;
+
+ if (ar->supports_6ghz)
+ timeout += 5 * HZ;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.started, timeout);
+ if (ret == 0) {
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+
+ return -ETIMEDOUT;
+ }
+
+ /* If we failed to start the scan, return error code at
+ * this point. This is probably due to some issue in the
+ * firmware, but no need to wedge the driver due to that...
+ */
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == ATH11K_SCAN_IDLE) {
+ spin_unlock_bh(&ar->data_lock);
+ return -EINVAL;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ return 0;
+}
+
+static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_scan_request *req = &hw_req->req;
+ struct scan_req_params *arg = NULL;
+ int ret = 0;
+ int i;
+ u32 scan_timeout;
+
+ /* Firmwares advertising the support of triggering 11D algorithm
+ * on the scan results of a regular scan expects driver to send
+ * WMI_11D_SCAN_START_CMDID before sending WMI_START_SCAN_CMDID.
+ * With this feature, separate 11D scan can be avoided since
+ * regdomain can be determined with the scan results of the
+ * regular scan.
+ */
+ if (ar->state_11d == ATH11K_11D_PREPARING &&
+ test_bit(WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN,
+ ar->ab->wmi_ab.svc_map))
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ ar->scan.state = ATH11K_SCAN_STARTING;
+ ar->scan.is_roc = false;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ret = 0;
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ arg = kzalloc(sizeof(*arg), GFP_KERNEL);
+
+ if (!arg) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ ath11k_wmi_start_scan_init(ar, arg);
+ arg->vdev_id = arvif->vdev_id;
+ arg->scan_id = ATH11K_SCAN_ID;
+
+ if (req->ie_len) {
+ arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
+ if (!arg->extraie.ptr) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+ arg->extraie.len = req->ie_len;
+ }
+
+ if (req->n_ssids) {
+ arg->num_ssids = req->n_ssids;
+ for (i = 0; i < arg->num_ssids; i++) {
+ arg->ssid[i].length = req->ssids[i].ssid_len;
+ memcpy(&arg->ssid[i].ssid, req->ssids[i].ssid,
+ req->ssids[i].ssid_len);
+ }
+ } else {
+ arg->scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ }
+
+ if (req->n_channels) {
+ arg->num_chan = req->n_channels;
+ arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
+ GFP_KERNEL);
+
+ if (!arg->chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ for (i = 0; i < arg->num_chan; i++) {
+ if (test_bit(WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL,
+ ar->ab->wmi_ab.svc_map)) {
+ arg->chan_list[i] =
+ u32_encode_bits(req->channels[i]->center_freq,
+ WMI_SCAN_CONFIG_PER_CHANNEL_MASK);
+
+ /* If NL80211_SCAN_FLAG_COLOCATED_6GHZ is set in scan
+ * flags, then scan all PSC channels in 6 GHz band and
+ * those non-PSC channels where RNR IE is found during
+ * the legacy 2.4/5 GHz scan.
+ * If NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set,
+ * then all channels in 6 GHz will be scanned.
+ */
+ if (req->channels[i]->band == NL80211_BAND_6GHZ &&
+ req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ &&
+ !cfg80211_channel_is_psc(req->channels[i]))
+ arg->chan_list[i] |=
+ WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND;
+ } else {
+ arg->chan_list[i] = req->channels[i]->center_freq;
+ }
+ }
+ }
+
+ if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ arg->scan_f_add_spoofed_mac_in_probe = 1;
+ ether_addr_copy(arg->mac_addr.addr, req->mac_addr);
+ ether_addr_copy(arg->mac_mask.addr, req->mac_addr_mask);
+ }
+
+ /* if duration is set, default dwell times will be overwritten */
+ if (req->duration) {
+ arg->dwell_time_active = req->duration;
+ arg->dwell_time_active_2g = req->duration;
+ arg->dwell_time_active_6g = req->duration;
+ arg->dwell_time_passive = req->duration;
+ arg->dwell_time_passive_6g = req->duration;
+ arg->burst_duration = req->duration;
+
+ scan_timeout = min_t(u32, arg->max_rest_time *
+ (arg->num_chan - 1) + (req->duration +
+ ATH11K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
+ arg->num_chan, arg->max_scan_time);
+ } else {
+ scan_timeout = arg->max_scan_time;
+ }
+
+ /* Add a margin to account for event/command processing */
+ scan_timeout += ATH11K_MAC_SCAN_CMD_EVT_OVERHEAD;
+
+ ret = ath11k_start_scan(ar, arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start hw scan: %d\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(scan_timeout));
+
+exit:
+ if (arg) {
+ kfree(arg->chan_list);
+ kfree(arg->extraie.ptr);
+ kfree(arg);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
+ return ret;
+}
+
+static void ath11k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_scan_abort(ar);
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+}
+
+static int ath11k_install_key(struct ath11k_vif *arvif,
+ struct ieee80211_key_conf *key,
+ enum set_key_cmd cmd,
+ const u8 *macaddr, u32 flags)
+{
+ int ret;
+ struct ath11k *ar = arvif->ar;
+ struct wmi_vdev_install_key_arg arg = {
+ .vdev_id = arvif->vdev_id,
+ .key_idx = key->keyidx,
+ .key_len = key->keylen,
+ .key_data = key->key,
+ .key_flags = flags,
+ .macaddr = macaddr,
+ };
+
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
+ reinit_completion(&ar->install_key_done);
+
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 0;
+
+ if (cmd == DISABLE_KEY) {
+ arg.key_cipher = WMI_CIPHER_NONE;
+ arg.key_data = NULL;
+ goto install;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ /* TODO: Re-check if flag is valid */
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ arg.key_cipher = WMI_CIPHER_TKIP;
+ arg.key_txmic_len = 8;
+ arg.key_rxmic_len = 8;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_CCM;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ arg.key_cipher = WMI_CIPHER_AES_GCM;
+ break;
+ default:
+ ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
+ return -EOPNOTSUPP;
+ }
+
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+
+install:
+ ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg);
+
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
+ return -ETIMEDOUT;
+
+ return ar->install_key_status ? -EINVAL : 0;
+}
+
+static int ath11k_clear_peer_keys(struct ath11k_vif *arvif,
+ const u8 *addr)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_peer *peer;
+ int first_errno = 0;
+ int ret;
+ int i;
+ u32 flags = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer)
+ return -ENOENT;
+
+ for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
+ if (!peer->keys[i])
+ continue;
+
+ /* key flags are not required to delete the key */
+ ret = ath11k_install_key(arvif, peer->keys[i],
+ DISABLE_KEY, addr, flags);
+ if (ret < 0 && first_errno == 0)
+ first_errno = ret;
+
+ if (ret < 0)
+ ath11k_warn(ab, "failed to remove peer key %d: %d\n",
+ i, ret);
+
+ spin_lock_bh(&ab->base_lock);
+ peer->keys[i] = NULL;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+ return first_errno;
+}
+
+static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ const u8 *peer_addr;
+ int ret = 0;
+ u32 flags = 0;
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta)
+ peer_addr = sta->addr;
+ else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ peer_addr = vif->bss_conf.bssid;
+ else
+ peer_addr = vif->addr;
+
+ key->hw_key_idx = key->keyidx;
+
+ /* the peer should not disappear in mid-way (unless FW goes awry) since
+ * we already hold conf_mutex. we just make sure its there now.
+ */
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+
+ /* flush the fragments cache during key (re)install to
+ * ensure all frags in the new frag list belong to the same key.
+ */
+ if (peer && sta && cmd == SET_KEY)
+ ath11k_peer_frags_flush(ar, peer);
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!peer) {
+ if (cmd == SET_KEY) {
+ ath11k_warn(ab, "cannot install key for non-existent peer %pM\n",
+ peer_addr);
+ ret = -EOPNOTSUPP;
+ goto exit;
+ } else {
+ /* if the peer doesn't exist there is no key to disable
+ * anymore
+ */
+ goto exit;
+ }
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ flags |= WMI_KEY_PAIRWISE;
+ else
+ flags |= WMI_KEY_GROUP;
+
+ ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
+ if (ret) {
+ ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_dp_peer_rx_pn_replay_config(arvif, peer_addr, cmd, key);
+ if (ret) {
+ ath11k_warn(ab, "failed to offload PN replay detection %d\n", ret);
+ goto exit;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
+ if (peer && cmd == SET_KEY) {
+ peer->keys[key->keyidx] = key;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ peer->ucast_keyidx = key->keyidx;
+ peer->sec_type = ath11k_dp_tx_get_encrypt_type(key->cipher);
+ } else {
+ peer->mcast_keyidx = key->keyidx;
+ peer->sec_type_grp = ath11k_dp_tx_get_encrypt_type(key->cipher);
+ }
+ } else if (peer && cmd == DISABLE_KEY) {
+ peer->keys[key->keyidx] = NULL;
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+ peer->ucast_keyidx = 0;
+ else
+ peer->mcast_keyidx = 0;
+ } else if (!peer)
+ /* impossible unless FW goes crazy */
+ ath11k_warn(ab, "peer %pM disappeared!\n", peer_addr);
+
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (cmd == SET_KEY)
+ arsta->pn_type = HAL_PN_TYPE_WPA;
+ else
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ default:
+ arsta->pn_type = HAL_PN_TYPE_NONE;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int
+ath11k_mac_bitrate_mask_num_ht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight8(mask->control[band].ht_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
+ num_rates += hweight16(mask->control[band].vht_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath11k_mac_bitrate_mask_num_he_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++)
+ num_rates += hweight16(mask->control[band].he_mcs[i]);
+
+ return num_rates;
+}
+
+static int
+ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 vht_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ nss = i + 1;
+ vht_rate = ffs(mask->control[band].vht_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate*/
+ if (nss > sta->deflink.rx_nss)
+ return -EINVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(vht_rate, nss - 1,
+ WMI_RATE_PREAMBLE_VHT);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update STA %pM Fixed Rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int
+ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 he_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (hweight16(mask->control[band].he_mcs[i]) == 1) {
+ nss = i + 1;
+ he_rate = ffs(mask->control[band].he_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single he fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate */
+ if (nss > sta->deflink.rx_nss)
+ return -EINVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "setting fixed he rate for peer %pM, device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(he_rate, nss - 1,
+ WMI_RATE_PREAMBLE_HE);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update sta %pM fixed rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int
+ath11k_mac_set_peer_ht_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 ht_rate, nss = 0;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+ nss = i + 1;
+ ht_rate = ffs(mask->control[band].ht_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single HT Fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate*/
+ if (nss > sta->deflink.rx_nss)
+ return -EINVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "Setting Fixed HT Rate for peer %pM. Device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(ht_rate, nss - 1,
+ WMI_RATE_PREAMBLE_HT);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update STA %pM HT Fixed Rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
+static int ath11k_station_assoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool reassoc)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct peer_assoc_params peer_arg;
+ int ret = 0;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ struct cfg80211_bitrate_mask *mask;
+ u8 num_ht_rates, num_vht_rates, num_he_rates;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return -EPERM;
+
+ band = def.chan->band;
+ mask = &arvif->bitrate_mask;
+
+ ath11k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+
+ peer_arg.is_assoc = true;
+ ret = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return -ETIMEDOUT;
+ }
+
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask);
+ num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask);
+
+ /* If single VHT/HE rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT/HE. This is now enabled by a peer specific
+ * fixed param.
+ * Note that all other rates and NSS will be disabled for this peer.
+ */
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ ret = ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
+ ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ } else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) {
+ ret = ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
+ }
+
+ /* Re-assoc is run only to update supported rates for given station. It
+ * doesn't make much sense to reconfigure the peer completely.
+ */
+ if (reassoc)
+ return 0;
+
+ ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
+ &sta->deflink.ht_cap,
+ le16_to_cpu(sta->deflink.he_6ghz_capa.capa));
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations++;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ if (sta->wme && sta->uapsd_queues) {
+ ret = ath11k_peer_assoc_qos_ap(ar, arvif, sta);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_station_disassoc(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!sta->wme) {
+ arvif->num_legacy_stations--;
+ ret = ath11k_recalc_rtscts_prot(arvif);
+ if (ret)
+ return ret;
+ }
+
+ ret = ath11k_clear_peer_keys(arvif, sta->addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ return 0;
+}
+
+static void ath11k_sta_rc_update_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ u32 changed, bw, nss, smps, bw_prev;
+ int err, num_ht_rates, num_vht_rates, num_he_rates;
+ const struct cfg80211_bitrate_mask *mask;
+ struct peer_assoc_params peer_arg;
+ enum wmi_phy_mode peer_phymode;
+
+ arsta = container_of(wk, struct ath11k_sta, update_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ if (WARN_ON(ath11k_mac_vif_chan(arvif->vif, &def)))
+ return;
+
+ band = def.chan->band;
+ ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
+ vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+ spin_lock_bh(&ar->data_lock);
+
+ changed = arsta->changed;
+ arsta->changed = 0;
+
+ bw = arsta->bw;
+ bw_prev = arsta->bw_prev;
+ nss = arsta->nss;
+ smps = arsta->smps;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_lock(&ar->conf_mutex);
+
+ nss = max_t(u32, 1, nss);
+ nss = min(nss, max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)),
+ ath11k_mac_max_he_nss(he_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ /* Get the peer phymode */
+ ath11k_peer_assoc_h_phymode(ar, arvif->vif, sta, &peer_arg);
+ peer_phymode = peer_arg.peer_phymode;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM peer bw %d phymode %d\n",
+ sta->addr, bw, peer_phymode);
+
+ if (bw > bw_prev) {
+ /* BW is upgraded. In this case we send WMI_PEER_PHYMODE
+ * followed by WMI_PEER_CHWIDTH
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW upgrade for sta %pM new BW %d, old BW %d\n",
+ sta->addr, bw, bw_prev);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_PHYMODE, peer_phymode);
+
+ if (err) {
+ ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, peer_phymode, err);
+ goto err_rc_bw_changed;
+ }
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ } else {
+ /* BW is downgraded. In this case we send WMI_PEER_CHWIDTH
+ * followed by WMI_PEER_PHYMODE
+ */
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "BW downgrade for sta %pM new BW %d,old BW %d\n",
+ sta->addr, bw, bw_prev);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_CHWIDTH, bw);
+
+ if (err) {
+ ath11k_warn(ar->ab, "failed to update STA %pM peer bw %d: %d\n",
+ sta->addr, bw, err);
+ goto err_rc_bw_changed;
+ }
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_PHYMODE, peer_phymode);
+
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, peer_phymode, err);
+ }
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM nss %d\n",
+ sta->addr, nss);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_NSS, nss);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
+ sta->addr, nss, err);
+ }
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "update sta %pM smps %d\n",
+ sta->addr, smps);
+
+ err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_MIMO_PS_STATE, smps);
+ if (err)
+ ath11k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
+ sta->addr, smps, err);
+ }
+
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ mask = &arvif->bitrate_mask;
+ num_ht_rates = ath11k_mac_bitrate_mask_num_ht_rates(ar, band,
+ mask);
+ num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
+
+ /* Peer_assoc_prepare will reject vht rates in
+ * bitrate_mask if its not available in range format and
+ * sets vht tx_rateset as unsupported. So multiple VHT MCS
+ * setting(eg. MCS 4,5,6) per peer is not supported here.
+ * But, Single rate in VHT mask can be set as per-peer
+ * fixed rate. But even if any HT rates are configured in
+ * the bitrate mask, device will not switch to those rates
+ * when per-peer Fixed rate is set.
+ * TODO: Check RATEMASK_CMDID to support auto rates selection
+ * across HT/VHT and for multiple VHT MCS support.
+ */
+ if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+ ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
+ band);
+ } else if (sta->deflink.he_cap.has_he && num_he_rates == 1) {
+ ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
+ } else if (sta->deflink.ht_cap.ht_supported && num_ht_rates == 1) {
+ ath11k_mac_set_peer_ht_fixed_rate(arvif, sta, mask,
+ band);
+ } else {
+ /* If the peer is non-VHT/HE or no fixed VHT/HE rate
+ * is provided in the new bitrate mask we set the
+ * other rates using peer_assoc command. Also clear
+ * the peer fixed rate settings as it has higher proprity
+ * than peer assoc
+ */
+ err = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (err)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for sta %pM: %d\n",
+ sta->addr, err);
+
+ ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
+ &peer_arg, true);
+
+ peer_arg.is_assoc = false;
+ err = ath11k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+ if (err)
+ ath11k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
+ sta->addr, arvif->vdev_id, err);
+
+ if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
+ ath11k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ }
+ }
+
+err_rc_bw_changed:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath11k_sta_set_4addr_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ int ret = 0;
+
+ arsta = container_of(wk, struct ath11k_sta, set_4addr_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for peer %pM\n", sta->addr);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n",
+ sta->addr, ret);
+}
+
+static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return 0;
+
+ if (ar->num_stations >= ar->max_num_stations)
+ return -ENOBUFS;
+
+ ar->num_stations++;
+
+ return 0;
+}
+
+static void ath11k_mac_dec_num_stations(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
+ return;
+
+ ar->num_stations--;
+}
+
+static int ath11k_mac_station_add(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct peer_create_params peer_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = ath11k_mac_inc_num_stations(arvif, sta);
+ if (ret) {
+ ath11k_warn(ab, "refusing to associate station: too many connected already (%d)\n",
+ ar->max_num_stations);
+ goto exit;
+ }
+
+ arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
+ if (!arsta->rx_stats) {
+ ret = -ENOMEM;
+ goto dec_num_station;
+ }
+
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = sta->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+
+ ret = ath11k_peer_create(ar, arvif, sta, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ goto free_rx_stats;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
+ arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
+ if (!arsta->tx_stats) {
+ ret = -ENOMEM;
+ goto free_peer;
+ }
+ }
+
+ if (ieee80211_vif_is_mesh(vif)) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+ if (ret) {
+ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
+ sta->addr, ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ret = ath11k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
+ sta->addr, arvif->vdev_id, ret);
+ goto free_tx_stats;
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_tx_stats;
+ }
+ }
+
+ ewma_avg_rssi_init(&arsta->avg_rssi);
+ return 0;
+
+free_tx_stats:
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+free_peer:
+ ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+free_rx_stats:
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+dec_num_station:
+ ath11k_mac_dec_num_stations(arvif, sta);
+exit:
+ return ret;
+}
+
+static u32 ath11k_mac_ieee80211_sta_bw_to_wmi(struct ath11k *ar,
+ struct ieee80211_sta *sta)
+{
+ u32 bw = WMI_PEER_CHWIDTH_20MHZ;
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_20:
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw = WMI_PEER_CHWIDTH_40MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ bw = WMI_PEER_CHWIDTH_80MHZ;
+ break;
+ case IEEE80211_STA_RX_BW_160:
+ bw = WMI_PEER_CHWIDTH_160MHZ;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid bandwidth %d for %pM\n",
+ sta->deflink.bandwidth, sta->addr);
+ bw = WMI_PEER_CHWIDTH_20MHZ;
+ break;
+ }
+
+ return bw;
+}
+
+static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k_peer *peer;
+ int ret = 0;
+
+ /* cancel must be done outside the mutex to avoid deadlock */
+ if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->set_4addr_wk);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE) {
+ memset(arsta, 0, sizeof(*arsta));
+ arsta->arvif = arvif;
+ arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
+ INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
+
+ ret = ath11k_mac_station_add(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ } else if ((old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)) {
+ bool skip_peer_delete = ar->ab->hw_params.vdev_start_delay &&
+ vif->type == NL80211_IFTYPE_STATION;
+
+ ath11k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
+
+ if (!skip_peer_delete) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "Failed to delete peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ else
+ ath11k_dbg(ar->ab,
+ ATH11K_DBG_MAC,
+ "Removed peer: %pM for VDEV: %d\n",
+ sta->addr, arvif->vdev_id);
+ }
+
+ ath11k_mac_dec_num_stations(arvif, sta);
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (skip_peer_delete && peer) {
+ peer->sta = NULL;
+ } else if (peer && peer->sta == sta) {
+ ath11k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ peer->sta = NULL;
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+
+ kfree(arsta->tx_stats);
+ arsta->tx_stats = NULL;
+
+ kfree(arsta->rx_stats);
+ arsta->rx_stats = NULL;
+ } else if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_assoc(ar, vif, sta, false);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
+ sta->addr);
+
+ spin_lock_bh(&ar->data_lock);
+ /* Set arsta bw and prev bw */
+ arsta->bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
+ spin_unlock_bh(&ar->data_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTHORIZED) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = true;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_AUTHORIZE,
+ 1);
+ if (ret)
+ ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+ sta->addr, arvif->vdev_id, ret);
+ }
+ } else if (old_state == IEEE80211_STA_AUTHORIZED &&
+ new_state == IEEE80211_STA_ASSOC) {
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (peer)
+ peer->is_authorized = false;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ } else if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH &&
+ (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_MESH_POINT ||
+ vif->type == NL80211_IFTYPE_ADHOC)) {
+ ret = ath11k_station_disassoc(ar, vif, sta);
+ if (ret)
+ ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+ sta->addr);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret = 0;
+ s16 txpwr;
+
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
+ txpwr = 0;
+ } else {
+ txpwr = sta->deflink.txpwr.power;
+ if (!txpwr)
+ return -EINVAL;
+ }
+
+ if (txpwr > ATH11K_TX_POWER_MAX_VAL || txpwr < ATH11K_TX_POWER_MIN_VAL)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+ WMI_PEER_USE_FIXED_PWR, txpwr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
+ ret);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ if (enabled && !arsta->use_4addr_set) {
+ ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk);
+ arsta->use_4addr_set = true;
+ }
+}
+
+static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_peer *peer;
+ u32 bw, smps;
+
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ ath11k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
+ sta->addr, arvif->vdev_id);
+ return;
+ }
+
+ spin_unlock_bh(&ar->ab->base_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
+ sta->addr, changed, sta->deflink.bandwidth,
+ sta->deflink.rx_nss,
+ sta->deflink.smps_mode);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+ bw = ath11k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+ arsta->bw_prev = arsta->bw;
+ arsta->bw = bw;
+ }
+
+ if (changed & IEEE80211_RC_NSS_CHANGED)
+ arsta->nss = sta->deflink.rx_nss;
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED) {
+ smps = WMI_PEER_SMPS_PS_NONE;
+
+ switch (sta->deflink.smps_mode) {
+ case IEEE80211_SMPS_AUTOMATIC:
+ case IEEE80211_SMPS_OFF:
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ case IEEE80211_SMPS_STATIC:
+ smps = WMI_PEER_SMPS_STATIC;
+ break;
+ case IEEE80211_SMPS_DYNAMIC:
+ smps = WMI_PEER_SMPS_DYNAMIC;
+ break;
+ default:
+ ath11k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
+ sta->deflink.smps_mode, sta->addr);
+ smps = WMI_PEER_SMPS_PS_NONE;
+ break;
+ }
+
+ arsta->smps = smps;
+ }
+
+ arsta->changed |= changed;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(hw, &arsta->update_wk);
+}
+
+static int ath11k_conf_tx_uapsd(struct ath11k *ar, struct ieee80211_vif *vif,
+ u16 ac, bool enable)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 value = 0;
+ int ret = 0;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_VI:
+ value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BE:
+ value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
+ break;
+ case IEEE80211_AC_BK:
+ value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
+ break;
+ }
+
+ if (enable)
+ arvif->u.sta.uapsd |= value;
+ else
+ arvif->u.sta.uapsd &= ~value;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_UAPSD,
+ arvif->u.sta.uapsd);
+ if (ret) {
+ ath11k_warn(ar->ab, "could not set uapsd params %d\n", ret);
+ goto exit;
+ }
+
+ if (arvif->u.sta.uapsd)
+ value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
+ else
+ value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY,
+ value);
+ if (ret)
+ ath11k_warn(ar->ab, "could not set rx wake param %d\n", ret);
+
+exit:
+ return ret;
+}
+
+static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ unsigned int link_id, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct wmi_wmm_params_arg *p = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ p = &arvif->wmm_params.ac_vo;
+ break;
+ case IEEE80211_AC_VI:
+ p = &arvif->wmm_params.ac_vi;
+ break;
+ case IEEE80211_AC_BE:
+ p = &arvif->wmm_params.ac_be;
+ break;
+ case IEEE80211_AC_BK:
+ p = &arvif->wmm_params.ac_bk;
+ break;
+ }
+
+ if (WARN_ON(!p)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ p->cwmin = params->cw_min;
+ p->cwmax = params->cw_max;
+ p->aifs = params->aifs;
+ p->txop = params->txop;
+
+ ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+ &arvif->wmm_params);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set sta uapsd: %d\n", ret);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static struct ieee80211_sta_ht_cap
+ath11k_create_ht_cap(struct ath11k *ar, u32 ar_ht_cap, u32 rate_cap_rx_chainmask)
+{
+ int i;
+ struct ieee80211_sta_ht_cap ht_cap = {0};
+ u32 ar_vht_cap = ar->pdev->cap.vht_cap;
+
+ if (!(ar_ht_cap & WMI_HT_CAP_ENABLED))
+ return ht_cap;
+
+ ht_cap.ht_supported = 1;
+ ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
+ ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT20_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+
+ if (ar_ht_cap & WMI_HT_CAP_HT40_SGI)
+ ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+
+ if (ar_ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) {
+ u32 smps;
+
+ smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+ smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
+
+ ht_cap.cap |= smps;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_TX_STBC)
+ ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_STBC) {
+ u32 stbc;
+
+ stbc = ar_ht_cap;
+ stbc &= WMI_HT_CAP_RX_STBC;
+ stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
+ stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
+ stbc &= IEEE80211_HT_CAP_RX_STBC;
+
+ ht_cap.cap |= stbc;
+ }
+
+ if (ar_ht_cap & WMI_HT_CAP_RX_LDPC)
+ ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (ar_ht_cap & WMI_HT_CAP_L_SIG_TXOP_PROT)
+ ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
+
+ if (ar_vht_cap & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
+ ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ for (i = 0; i < ar->num_rx_chains; i++) {
+ if (rate_cap_rx_chainmask & BIT(i))
+ ht_cap.mcs.rx_mask[i] = 0xFF;
+ }
+
+ ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+
+ return ht_cap;
+}
+
+static int ath11k_mac_set_txbf_conf(struct ath11k_vif *arvif)
+{
+ u32 value = 0;
+ struct ath11k *ar = arvif->ar;
+ int nsts;
+ int sound_dim;
+ u32 vht_cap = ar->pdev->cap.vht_cap;
+ u32 vdev_param = WMI_VDEV_PARAM_TXBF;
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
+ nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
+ value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
+ }
+
+ if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ sound_dim = vht_cap &
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+ value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
+ }
+
+ if (!value)
+ return 0;
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
+ }
+
+ /* TODO: SUBFEE not validated in HK, disable here until validated? */
+
+ if (vht_cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
+ value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
+
+ if ((vht_cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
+ }
+
+ return ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, value);
+}
+
+static void ath11k_set_vht_txbf_cap(struct ath11k *ar, u32 *vht_cap)
+{
+ bool subfer, subfee;
+ int sound_dim = 0, nsts = 0;
+
+ subfer = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE));
+ subfee = !!(*vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+
+ if (ar->num_tx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ subfer = false;
+ }
+
+ if (ar->num_rx_chains < 2) {
+ *vht_cap &= ~(IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ subfee = false;
+ }
+
+ /* If SU Beaformer is not set, then disable MU Beamformer Capability */
+ if (!subfer)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ /* If SU Beaformee is not set, then disable MU Beamformee Capability */
+ if (!subfee)
+ *vht_cap &= ~(IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+
+ sound_dim = (*vht_cap & IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK);
+ sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+
+ nsts = (*vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK);
+ nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ *vht_cap &= ~IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+ /* Enable Sounding Dimension Field only if SU BF is enabled */
+ if (subfer) {
+ if (sound_dim > (ar->num_tx_chains - 1))
+ sound_dim = ar->num_tx_chains - 1;
+
+ sound_dim <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
+ sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
+ *vht_cap |= sound_dim;
+ }
+
+ /* Enable Beamformee STS Field only if SU BF is enabled */
+ if (subfee) {
+ if (nsts > (ar->num_rx_chains - 1))
+ nsts = ar->num_rx_chains - 1;
+
+ nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+ *vht_cap |= nsts;
+ }
+}
+
+static struct ieee80211_sta_vht_cap
+ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
+ u32 rate_cap_rx_chainmask)
+{
+ struct ieee80211_sta_vht_cap vht_cap = {0};
+ u16 txmcs_map, rxmcs_map;
+ int i;
+
+ vht_cap.vht_supported = 1;
+ vht_cap.cap = ar->pdev->cap.vht_cap;
+
+ if (ar->pdev->cap.nss_ratio_enabled)
+ vht_cap.vht_mcs.tx_highest |=
+ cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
+
+ ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
+
+ rxmcs_map = 0;
+ txmcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains && rate_cap_tx_chainmask & BIT(i))
+ txmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains && rate_cap_rx_chainmask & BIT(i))
+ rxmcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ if (rate_cap_tx_chainmask <= 1)
+ vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
+
+ vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
+ vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
+
+ return vht_cap;
+}
+
+static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ u32 *ht_cap_info)
+{
+ struct ieee80211_supported_band *band;
+ u32 rate_cap_tx_chainmask;
+ u32 rate_cap_rx_chainmask;
+ u32 ht_cap;
+
+ rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
+ rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ (ar->ab->hw_params.single_pdev_only ||
+ !ar->supports_6ghz)) {
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
+ if (ht_cap_info)
+ *ht_cap_info = ht_cap;
+ band->ht_cap = ath11k_create_ht_cap(ar, ht_cap,
+ rate_cap_rx_chainmask);
+ band->vht_cap = ath11k_create_vht_cap(ar, rate_cap_tx_chainmask,
+ rate_cap_rx_chainmask);
+ }
+}
+
+static int ath11k_check_chain_mask(struct ath11k *ar, u32 ant, bool is_tx_ant)
+{
+ /* TODO: Check the request chainmask against the supported
+ * chainmask table which is advertised in extented_service_ready event
+ */
+
+ return 0;
+}
+
+static void ath11k_gen_ppe_thresh(struct ath11k_ppe_threshold *fw_ppet,
+ u8 *he_ppet)
+{
+ int nss, ru;
+ u8 bit = 7;
+
+ he_ppet[0] = fw_ppet->numss_m1 & IEEE80211_PPE_THRES_NSS_MASK;
+ he_ppet[0] |= (fw_ppet->ru_bit_mask <<
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS) &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK;
+ for (nss = 0; nss <= fw_ppet->numss_m1; nss++) {
+ for (ru = 0; ru < 4; ru++) {
+ u8 val;
+ int i;
+
+ if ((fw_ppet->ru_bit_mask & BIT(ru)) == 0)
+ continue;
+ val = (fw_ppet->ppet16_ppet8_ru3_ru0[nss] >> (ru * 6)) &
+ 0x3f;
+ val = ((val >> 3) & 0x7) | ((val & 0x7) << 3);
+ for (i = 5; i >= 0; i--) {
+ he_ppet[bit / 8] |=
+ ((val >> i) & 0x1) << ((bit % 8));
+ bit++;
+ }
+ }
+ }
+}
+
+static void
+ath11k_mac_filter_he_cap_mesh(struct ieee80211_he_cap_elem *he_cap_elem)
+{
+ u8 m;
+
+ m = IEEE80211_HE_MAC_CAP0_TWT_RES |
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->mac_cap_info[0] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP2_TRS |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED |
+ IEEE80211_HE_MAC_CAP2_BCAST_TWT |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING;
+ he_cap_elem->mac_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG |
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECTIVE_TRANSMISSION |
+ IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU |
+ IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING |
+ IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ he_cap_elem->mac_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ he_cap_elem->phy_cap_info[2] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK;
+ he_cap_elem->phy_cap_info[3] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+ he_cap_elem->phy_cap_info[4] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ he_cap_elem->phy_cap_info[5] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+ he_cap_elem->phy_cap_info[6] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP7_PSR_BASED_SR |
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ he_cap_elem->phy_cap_info[7] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ he_cap_elem->phy_cap_info[8] &= ~m;
+
+ m = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ he_cap_elem->phy_cap_info[9] &= ~m;
+}
+
+static __le16 ath11k_mac_setup_he_6ghz_cap(struct ath11k_pdev_cap *pcap,
+ struct ath11k_band_cap *bcap)
+{
+ u8 val;
+
+ bcap->he_6ghz_capa = IEEE80211_HT_MPDU_DENSITY_NONE;
+ if (bcap->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
+ bcap->he_6ghz_capa |=
+ FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
+ WLAN_HT_CAP_SM_PS_DYNAMIC);
+ else
+ bcap->he_6ghz_capa |=
+ FIELD_PREP(IEEE80211_HE_6GHZ_CAP_SM_PS,
+ WLAN_HT_CAP_SM_PS_DISABLED);
+ val = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ pcap->vht_cap);
+ bcap->he_6ghz_capa |=
+ FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP, val);
+ val = FIELD_GET(IEEE80211_VHT_CAP_MAX_MPDU_MASK, pcap->vht_cap);
+ bcap->he_6ghz_capa |=
+ FIELD_PREP(IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN, val);
+ if (pcap->vht_cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
+ bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+ if (pcap->vht_cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN)
+ bcap->he_6ghz_capa |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS;
+
+ return cpu_to_le16(bcap->he_6ghz_capa);
+}
+
+static void ath11k_mac_set_hemcsmap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ struct ieee80211_sta_he_cap *he_cap,
+ int band)
+{
+ u16 txmcs_map, rxmcs_map;
+ u32 i;
+
+ rxmcs_map = 0;
+ txmcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_tx_chains &&
+ (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+ txmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+ else
+ txmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+
+ if (i < ar->num_rx_chains &&
+ (ar->cfg_rx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+ rxmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+ else
+ rxmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+ }
+ he_cap->he_mcs_nss_supp.rx_mcs_80 =
+ cpu_to_le16(rxmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 =
+ cpu_to_le16(txmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_160 =
+ cpu_to_le16(rxmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_160 =
+ cpu_to_le16(txmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.rx_mcs_80p80 =
+ cpu_to_le16(rxmcs_map & 0xffff);
+ he_cap->he_mcs_nss_supp.tx_mcs_80p80 =
+ cpu_to_le16(txmcs_map & 0xffff);
+}
+
+static int ath11k_mac_copy_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap,
+ struct ieee80211_sband_iftype_data *data,
+ int band)
+{
+ int i, idx = 0;
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ath11k_band_cap *band_cap = &cap->band[band];
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ break;
+
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+ memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
+ sizeof(he_cap_elem->mac_cap_info));
+ memcpy(he_cap_elem->phy_cap_info, band_cap->he_cap_phy_info,
+ sizeof(he_cap_elem->phy_cap_info));
+
+ he_cap_elem->mac_cap_info[1] &=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
+
+ he_cap_elem->phy_cap_info[5] &=
+ ~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
+ he_cap_elem->phy_cap_info[5] |= ar->num_tx_chains - 1;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->phy_cap_info[3] &=
+ ~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[0] &=
+ ~IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_REQ;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ ath11k_mac_filter_he_cap_mesh(he_cap_elem);
+ break;
+ }
+
+ ath11k_mac_set_hemcsmap(ar, cap, he_cap, band);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
+ ath11k_gen_ppe_thresh(&band_cap->he_ppet,
+ he_cap->ppe_thres);
+
+ if (band == NL80211_BAND_6GHZ) {
+ data[idx].he_6ghz_capa.capa =
+ ath11k_mac_setup_he_6ghz_cap(cap, band_cap);
+ }
+ idx++;
+ }
+
+ return idx;
+}
+
+static void ath11k_mac_setup_he_cap(struct ath11k *ar,
+ struct ath11k_pdev_cap *cap)
+{
+ struct ieee80211_supported_band *band;
+ int count;
+
+ if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_2GHZ],
+ NL80211_BAND_2GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_2GHZ];
+ band->n_iftype_data = count;
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_5GHZ],
+ NL80211_BAND_5GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_5GHZ];
+ band->n_iftype_data = count;
+ }
+
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ ar->supports_6ghz) {
+ count = ath11k_mac_copy_he_cap(ar, cap,
+ ar->mac.iftype[NL80211_BAND_6GHZ],
+ NL80211_BAND_6GHZ);
+ band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+ band->iftype_data = ar->mac.iftype[NL80211_BAND_6GHZ];
+ band->n_iftype_data = count;
+ }
+}
+
+static int __ath11k_set_antenna(struct ath11k *ar, u32 tx_ant, u32 rx_ant)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ath11k_check_chain_mask(ar, tx_ant, true))
+ return -EINVAL;
+
+ if (ath11k_check_chain_mask(ar, rx_ant, false))
+ return -EINVAL;
+
+ ar->cfg_tx_chainmask = tx_ant;
+ ar->cfg_rx_chainmask = rx_ant;
+
+ if (ar->state != ATH11K_STATE_ON &&
+ ar->state != ATH11K_STATE_RESTARTED)
+ return 0;
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
+ tx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set tx-chainmask: %d, req 0x%x\n",
+ ret, tx_ant);
+ return ret;
+ }
+
+ ar->num_tx_chains = get_num_chains(tx_ant);
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ rx_ant, ar->pdev->pdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx-chainmask: %d, req 0x%x\n",
+ ret, rx_ant);
+ return ret;
+ }
+
+ ar->num_rx_chains = get_num_chains(rx_ant);
+
+ /* Reload HT/VHT/HE capability */
+ ath11k_mac_setup_ht_vht_cap(ar, &ar->pdev->cap, NULL);
+ ath11k_mac_setup_he_cap(ar, &ar->pdev->cap);
+
+ return 0;
+}
+
+static void ath11k_mgmt_over_wmi_tx_drop(struct ath11k *ar, struct sk_buff *skb)
+{
+ int num_mgmt;
+
+ ieee80211_free_txskb(ar->hw, skb);
+
+ num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
+
+ if (num_mgmt < 0)
+ WARN_ON_ONCE(1);
+
+ if (!num_mgmt)
+ wake_up(&ar->txmgmt_empty_waitq);
+}
+
+static void ath11k_mac_tx_mgmt_free(struct ath11k *ar, int buf_id)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ msdu = idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ if (!msdu)
+ return;
+
+ dma_unmap_single(ar->ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
+ DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ memset(&info->status, 0, sizeof(info->status));
+
+ ath11k_mgmt_over_wmi_tx_drop(ar, msdu);
+}
+
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+ struct ath11k *ar = ctx;
+
+ ath11k_mac_tx_mgmt_free(ar, buf_id);
+
+ return 0;
+}
+
+static int ath11k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
+ struct ath11k *ar = skb_cb->ar;
+
+ if (skb_cb->vif == vif)
+ ath11k_mac_tx_mgmt_free(ar, buf_id);
+
+ return 0;
+}
+
+static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct sk_buff *skb)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info;
+ dma_addr_t paddr;
+ int buf_id;
+ int ret;
+
+ ATH11K_SKB_CB(skb)->ar = ar;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
+ ATH11K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "tx mgmt frame, buf id %d\n", buf_id);
+
+ if (buf_id < 0)
+ return -ENOSPC;
+
+ info = IEEE80211_SKB_CB(skb);
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+ if ((ieee80211_is_action(hdr->frame_control) ||
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ }
+ }
+
+ paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ab->dev, paddr)) {
+ ath11k_warn(ab, "failed to DMA map mgmt Tx buffer\n");
+ ret = -EIO;
+ goto err_free_idr;
+ }
+
+ ATH11K_SKB_CB(skb)->paddr = paddr;
+
+ ret = ath11k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send mgmt frame: %d\n", ret);
+ goto err_unmap_buf;
+ }
+
+ return 0;
+
+err_unmap_buf:
+ dma_unmap_single(ab->dev, ATH11K_SKB_CB(skb)->paddr,
+ skb->len, DMA_TO_DEVICE);
+err_free_idr:
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ idr_remove(&ar->txmgmt_idr, buf_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ return ret;
+}
+
+static void ath11k_mgmt_over_wmi_tx_purge(struct ath11k *ar)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL)
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
+}
+
+static void ath11k_mgmt_over_wmi_tx_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k, wmi_mgmt_tx_work);
+ struct ath11k_skb_cb *skb_cb;
+ struct ath11k_vif *arvif;
+ struct sk_buff *skb;
+ int ret;
+
+ while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
+ skb_cb = ATH11K_SKB_CB(skb);
+ if (!skb_cb->vif) {
+ ath11k_warn(ar->ab, "no vif found for mgmt frame\n");
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
+ continue;
+ }
+
+ arvif = ath11k_vif_to_arvif(skb_cb->vif);
+ mutex_lock(&ar->conf_mutex);
+ if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
+ ret = ath11k_mac_mgmt_tx_wmi(ar, arvif, skb);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to tx mgmt frame, vdev_id %d :%d\n",
+ arvif->vdev_id, ret);
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "tx mgmt frame, vdev_id %d\n",
+ arvif->vdev_id);
+ }
+ } else {
+ ath11k_warn(ar->ab,
+ "dropping mgmt frame for vdev %d, is_started %d\n",
+ arvif->vdev_id,
+ arvif->is_started);
+ ath11k_mgmt_over_wmi_tx_drop(ar, skb);
+ }
+ mutex_unlock(&ar->conf_mutex);
+ }
+}
+
+static int ath11k_mac_mgmt_tx(struct ath11k *ar, struct sk_buff *skb,
+ bool is_prb_rsp)
+{
+ struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ /* Drop probe response packets when the pending management tx
+ * count has reached a certain threshold, so as to prioritize
+ * other mgmt packets like auth and assoc to be sent on time
+ * for establishing successful connections.
+ */
+ if (is_prb_rsp &&
+ atomic_read(&ar->num_pending_mgmt_tx) > ATH11K_PRB_RSP_DROP_THRESHOLD) {
+ ath11k_warn(ar->ab,
+ "dropping probe response as pending queue is almost full\n");
+ return -ENOSPC;
+ }
+
+ if (skb_queue_len_lockless(q) >= ATH11K_TX_MGMT_NUM_PENDING_MAX) {
+ ath11k_warn(ar->ab, "mgmt tx queue is full\n");
+ return -ENOSPC;
+ }
+
+ skb_queue_tail(q, skb);
+ atomic_inc(&ar->num_pending_mgmt_tx);
+ queue_work(ar->ab->workqueue_aux, &ar->wmi_mgmt_tx_work);
+
+ return 0;
+}
+
+static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ath11k_sta *arsta = NULL;
+ u32 info_flags = info->flags;
+ bool is_prb_rsp;
+ int ret;
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ skb_cb->vif = vif;
+
+ if (key) {
+ skb_cb->cipher = key->cipher;
+ skb_cb->flags |= ATH11K_SKB_CIPHER_SET;
+ }
+
+ if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+ skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
+ } else if (ieee80211_is_mgmt(hdr->frame_control)) {
+ is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
+ ret = ath11k_mac_mgmt_tx(ar, skb, is_prb_rsp);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to queue management frame %d\n",
+ ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ return;
+ }
+
+ if (control->sta)
+ arsta = (struct ath11k_sta *)control->sta->drv_priv;
+
+ ret = ath11k_dp_tx(ar, arvif, arsta, skb);
+ if (unlikely(ret)) {
+ ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+}
+
+void ath11k_mac_drain_tx(struct ath11k *ar)
+{
+ /* make sure rcu-protected mac80211 tx path itself is drained */
+ synchronize_net();
+
+ cancel_work_sync(&ar->wmi_mgmt_tx_work);
+ ath11k_mgmt_over_wmi_tx_purge(ar);
+}
+
+static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
+{
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct ath11k_base *ab = ar->ab;
+ int i, ret = 0;
+ u32 ring_id;
+
+ if (enable) {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ if (ath11k_debugfs_rx_filter(ar))
+ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE,
+ &tlv_filter);
+ }
+
+ if (enable && !ar->ab->hw_params.rxdma1_enable)
+ mod_timer(&ar->ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+
+ return ret;
+}
+
+static void ath11k_mac_wait_reconfigure(struct ath11k_base *ab)
+{
+ int recovery_start_count;
+
+ if (!ab->is_reset)
+ return;
+
+ recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
+
+ if (recovery_start_count == ab->num_radios) {
+ complete(&ab->recovery_start);
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "recovery started success\n");
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "waiting reconfigure...\n");
+
+ wait_for_completion_timeout(&ab->reconfigure_complete,
+ ATH11K_RECONFIGURE_TIMEOUT_HZ);
+}
+
+static int ath11k_mac_op_start(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+
+ if (ath11k_ftm_mode) {
+ ath11k_warn(ab, "mac operations not supported in factory test mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ ath11k_mac_drain_tx(ar);
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH11K_STATE_OFF:
+ ar->state = ATH11K_STATE_ON;
+ break;
+ case ATH11K_STATE_RESTARTING:
+ ar->state = ATH11K_STATE_RESTARTED;
+ ath11k_mac_wait_reconfigure(ab);
+ break;
+ case ATH11K_STATE_RESTARTED:
+ case ATH11K_STATE_WEDGED:
+ case ATH11K_STATE_ON:
+ case ATH11K_STATE_FTM:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable PMF QOS: (%d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 1,
+ pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable dynamic bw: %d\n", ret);
+ goto err;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
+ ret = ath11k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
+ if (ret) {
+ ath11k_err(ab, "failed to set prob req oui: %i\n", ret);
+ goto err;
+ }
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ 0, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to set ac override for ARP: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(ar, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to offload radar detection: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ab, "failed to req ppdu stats: %d\n", ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ 1, pdev->pdev_id);
+
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable MESH MCAST ENABLE: (%d\n", ret);
+ goto err;
+ }
+
+ __ath11k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
+
+ /* TODO: Do we need to enable ANI? */
+
+ ath11k_reg_update_chan_list(ar, false);
+
+ ar->num_started_vdevs = 0;
+ ar->num_created_vdevs = 0;
+ ar->num_peers = 0;
+ ar->allocated_vdev_map = 0;
+
+ /* Configure monitor status ring with default rx_filter to get rx status
+ * such as rssi, rx_duration.
+ */
+ ret = ath11k_mac_config_mon_status_default(ar, true);
+ if (ret) {
+ ath11k_err(ab, "failed to configure monitor status ring with default rx_filter: (%d)\n",
+ ret);
+ goto err;
+ }
+
+ /* Configure the hash seed for hash based reo dest ring selection */
+ ath11k_wmi_pdev_lro_cfg(ar, ar->pdev->pdev_id);
+
+ /* allow device to enter IMPS */
+ if (ab->hw_params.idle_ps) {
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ 1, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to enable idle ps: %d\n", ret);
+ goto err;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
+ &ab->pdevs[ar->pdev_idx]);
+
+ return 0;
+
+err:
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+ int ret;
+
+ ath11k_mac_drain_tx(ar);
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_mac_config_mon_status_default(ar, false);
+ if (ret)
+ ath11k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
+ ret);
+
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ar->state = ATH11K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+ cancel_work_sync(&ar->regd_update_work);
+ cancel_work_sync(&ar->ab->update_11d_work);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
+ list_del(&ppdu_stats->list);
+ kfree(ppdu_stats);
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
+
+ synchronize_rcu();
+
+ atomic_set(&ar->num_pending_mgmt_tx, 0);
+}
+
+static int ath11k_mac_setup_vdev_params_mbssid(struct ath11k_vif *arvif,
+ u32 *flags, u32 *tx_vdev_id)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_vif *tx_arvif;
+ struct ieee80211_vif *tx_vif;
+
+ *tx_vdev_id = 0;
+ tx_vif = arvif->vif->mbssid_tx_vif;
+ if (!tx_vif) {
+ *flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
+ return 0;
+ }
+
+ tx_arvif = ath11k_vif_to_arvif(tx_vif);
+
+ if (arvif->vif->bss_conf.nontransmitted) {
+ if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ return -EINVAL;
+
+ *flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP;
+ *tx_vdev_id = ath11k_vif_to_arvif(tx_vif)->vdev_id;
+ } else if (tx_arvif == arvif) {
+ *flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
+ } else {
+ return -EINVAL;
+ }
+
+ if (arvif->vif->bss_conf.ema_ap)
+ *flags |= WMI_HOST_VDEV_FLAGS_EMA_MODE;
+
+ return 0;
+}
+
+static int ath11k_mac_setup_vdev_create_params(struct ath11k_vif *arvif,
+ struct vdev_create_params *params)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_pdev *pdev = ar->pdev;
+ int ret;
+
+ params->if_id = arvif->vdev_id;
+ params->type = arvif->vdev_type;
+ params->subtype = arvif->vdev_subtype;
+ params->pdev_id = pdev->pdev_id;
+ params->mbssid_flags = 0;
+ params->mbssid_tx_vdev_id = 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath11k_mac_setup_vdev_params_mbssid(arvif,
+ &params->mbssid_flags,
+ &params->mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ params->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ params->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ ar->supports_6ghz) {
+ params->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
+ params->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
+ }
+ return 0;
+}
+
+static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 param_id, param_value;
+ int ret;
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET ||
+ (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP))
+ vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
+ IEEE80211_OFFLOAD_DECAP_ENABLED);
+
+ if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ }
+
+ param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
+ if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d rx decap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED;
+ }
+}
+
+static bool ath11k_mac_vif_ap_active_any(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ struct ath11k_vif *arvif;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_AP)
+ return true;
+ }
+ }
+ return false;
+}
+
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id)
+{
+ struct wmi_11d_scan_start_params param;
+ int ret;
+
+ mutex_lock(&ar->ab->vdev_id_11d_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev id for 11d scan %d\n",
+ ar->vdev_id_11d_scan);
+
+ if (ar->regdom_set_by_user)
+ goto fin;
+
+ if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID)
+ goto fin;
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ goto fin;
+
+ if (ath11k_mac_vif_ap_active_any(ar->ab))
+ goto fin;
+
+ param.vdev_id = vdev_id;
+ param.start_interval_msec = 0;
+ param.scan_period_msec = ATH11K_SCAN_11D_INTERVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "start 11d scan\n");
+
+ ret = ath11k_wmi_send_11d_scan_start_cmd(ar, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ } else {
+ ar->vdev_id_11d_scan = vdev_id;
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ ar->state_11d = ATH11K_11D_RUNNING;
+ }
+
+fin:
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ mutex_unlock(&ar->ab->vdev_id_11d_lock);
+}
+
+void ath11k_mac_11d_scan_stop(struct ath11k *ar)
+{
+ int ret;
+ u32 vdev_id;
+
+ if (!test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ar->ab->wmi_ab.svc_map))
+ return;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d scan\n");
+
+ mutex_lock(&ar->ab->vdev_id_11d_lock);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "stop 11d vdev id %d\n",
+ ar->vdev_id_11d_scan);
+
+ if (ar->state_11d == ATH11K_11D_PREPARING) {
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ if (ar->vdev_id_11d_scan != ATH11K_11D_INVALID_VDEV_ID) {
+ vdev_id = ar->vdev_id_11d_scan;
+
+ ret = ath11k_wmi_send_11d_scan_stop_cmd(ar, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stopt 11d scan vdev %d ret: %d\n",
+ vdev_id, ret);
+ } else {
+ ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+ }
+ mutex_unlock(&ar->ab->vdev_id_11d_lock);
+}
+
+void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "stop soc 11d scan\n");
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ ath11k_mac_11d_scan_stop(ar);
+ }
+}
+
+static int ath11k_mac_vdev_delete(struct ath11k *ar, struct ath11k_vif *arvif)
+{
+ unsigned long time_left;
+ struct ieee80211_vif *vif = arvif->vif;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_delete(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH11K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ return -ETIMEDOUT;
+ }
+
+ ar->ab->free_vdev_map |= 1LL << (arvif->vdev_id);
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ar->num_created_vdevs--;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
+
+ return ret;
+}
+
+static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct vdev_create_params vdev_param = {0};
+ struct peer_create_params peer_param;
+ u32 param_id, param_value;
+ u16 nss;
+ int i;
+ int ret, fbret;
+ int bit;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ ret = -ENOBUFS;
+ goto err;
+ }
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS(ab) - 1)) {
+ ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n",
+ ar->num_created_vdevs, TARGET_NUM_VDEVS(ab));
+ ret = -EBUSY;
+ goto err;
+ }
+
+ /* In the case of hardware recovery, debugfs files are
+ * not deleted since ieee80211_ops.remove_interface() is
+ * not invoked. In such cases, try to delete the files.
+ * These will be re-created later.
+ */
+ ath11k_debugfs_remove_interface(arvif);
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->ar = ar;
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath11k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].he_mcs));
+ }
+
+ bit = __ffs64(ab->free_vdev_map);
+
+ arvif->vdev_id = bit;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_STATION:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
+ fallthrough;
+ case NL80211_IFTYPE_AP:
+ arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ ar->monitor_vdev_id = bit;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "add interface id %d type %d subtype %d map %llx\n",
+ arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
+ ab->free_vdev_map);
+
+ vif->cab_queue = arvif->vdev_id % (ATH11K_HW_MAX_QUEUES - 1);
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = i % (ATH11K_HW_MAX_QUEUES - 1);
+
+ ret = ath11k_mac_setup_vdev_create_params(arvif, &vdev_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to create vdev parameters %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath11k_wmi_vdev_create(ar, vif->addr, &vdev_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to create WMI vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ar->num_created_vdevs++;
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
+ vif->addr, arvif->vdev_id);
+ ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
+ ab->free_vdev_map &= ~(1LL << arvif->vdev_id);
+
+ spin_lock_bh(&ar->data_lock);
+ list_add(&arvif->list, &ar->arvifs);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_mac_op_update_vif_offload(hw, vif);
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ arvif->vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_AP:
+ peer_param.vdev_id = arvif->vdev_id;
+ peer_param.peer_addr = vif->addr;
+ peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ ret = ath11k_peer_create(ar, arvif, NULL, &peer_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to vdev %d create peer for AP: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_set_kickout(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %i kickout parameters: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ ath11k_mac_11d_scan_stop_all(ar->ab);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ param_id = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
+ param_value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d RX wake policy: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
+ param_value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d TX wake threshold: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ param_id = WMI_STA_PS_PARAM_PSPOLL_COUNT;
+ param_value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
+ ret = ath11k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d pspoll count: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ ret = ath11k_wmi_pdev_set_ps_mode(ar, arvif->vdev_id,
+ WMI_STA_PS_MODE_DISABLED);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable vdev %d ps mode: %d\n",
+ arvif->vdev_id, ret);
+ goto err_peer_del;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_11D_OFFLOAD, ab->wmi_ab.svc_map)) {
+ reinit_completion(&ar->completed_11d_scan);
+ ar->state_11d = ATH11K_11D_PREPARING;
+ }
+ break;
+ case WMI_VDEV_TYPE_MONITOR:
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ break;
+ default:
+ break;
+ }
+
+ arvif->txpower = vif->bss_conf.txpower;
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret)
+ goto err_peer_del;
+
+ param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+ param_value = ar->hw->wiphy->rts_threshold;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rts threshold for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ath11k_dp_vdev_tx_attach(ar, arvif);
+
+ ath11k_debugfs_add_interface(arvif);
+
+ if (vif->type != NL80211_IFTYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
+ ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+
+err_peer_del:
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ fbret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (fbret) {
+ ath11k_warn(ar->ab, "fallback fail to delete peer addr %pM vdev_id %d ret %d\n",
+ vif->addr, arvif->vdev_id, fbret);
+ goto err;
+ }
+ }
+
+err_vdev_del:
+ ath11k_mac_vdev_delete(ar, arvif);
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_vif_unref(int buf_id, void *skb, void *ctx)
+{
+ struct ieee80211_vif *vif = (struct ieee80211_vif *)ctx;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB((struct sk_buff *)skb);
+
+ if (skb_cb->vif == vif)
+ skb_cb->vif = NULL;
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_base *ab = ar->ab;
+ int ret;
+ int i;
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "remove interface (vdev %d)\n",
+ arvif->vdev_id);
+
+ ret = ath11k_spectral_vif_stop(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to stop spectral for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath11k_mac_11d_scan_stop(ar);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath11k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ret = ath11k_mac_vdev_delete(ar, arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delete vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ ar->monitor_vdev_id = -1;
+ } else if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags) &&
+ !test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to delete vdev monitor during remove interface: %d",
+ ret);
+ }
+
+err_vdev_del:
+ spin_lock_bh(&ar->data_lock);
+ list_del(&arvif->list);
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_peer_cleanup(ar, arvif->vdev_id);
+
+ idr_for_each(&ar->txmgmt_idr,
+ ath11k_mac_vif_txmgmt_idr_remove, vif);
+
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
+ spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ idr_for_each(&ab->dp.tx_ring[i].txbuf_idr,
+ ath11k_mac_vif_unref, vif);
+ spin_unlock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
+ }
+
+ /* Recalc txpower for remaining vdev */
+ ath11k_mac_txpower_recalc(ar);
+
+ ath11k_debugfs_remove_interface(arvif);
+
+ /* TODO: recal traffic pause state based on the available vdevs */
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+/* FIXME: Has to be verified. */
+#define SUPPORTED_FILTERS \
+ (FIF_ALLMULTI | \
+ FIF_CONTROL | \
+ FIF_PSPOLL | \
+ FIF_OTHER_BSS | \
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PROBE_REQ | \
+ FIF_FCSFAIL)
+
+static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *total_flags &= SUPPORTED_FILTERS;
+ ar->filter_flags = *total_flags;
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ *tx_ant = ar->cfg_tx_chainmask;
+ *rx_ant = ar->cfg_rx_chainmask;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int ath11k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath11k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct ath11k *ar = hw->priv;
+ int ret = -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_RX_START:
+ ret = ath11k_dp_rx_ampdu_start(ar, params);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ ret = ath11k_dp_rx_ampdu_stop(ar, params);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* Tx A-MPDU aggregation offloaded to hw/fw so deny mac80211
+ * Tx aggregation requests.
+ */
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_add_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx add freq %u width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of multiple channel context, populate rx_channel from
+ * Rx PPDU desc information.
+ */
+ ar->rx_channel = ctx->def.chan;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx remove freq %u width %d ptr %p\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx);
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ /* TODO: In case of there is one more channel context left, populate
+ * rx_channel with the channel of that remaining channel context.
+ */
+ ar->rx_channel = NULL;
+ spin_unlock_bh(&ar->data_lock);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx,
+ bool restart)
+{
+ struct ath11k *ar = arvif->ar;
+ struct ath11k_base *ab = ar->ab;
+ struct wmi_vdev_start_req_arg arg = {};
+ const struct cfg80211_chan_def *chandef = &ctx->def;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.dtim_period = arvif->dtim_period;
+ arg.bcn_intval = arvif->beacon_interval;
+
+ arg.channel.freq = chandef->chan->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+ arg.channel.mode =
+ ath11k_phymodes[chandef->chan->band][chandef->width];
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = chandef->chan->max_power;
+ arg.channel.max_reg_power = chandef->chan->max_reg_power;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ arg.mbssid_flags = 0;
+ arg.mbssid_tx_vdev_id = 0;
+ if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath11k_mac_setup_vdev_params_mbssid(arvif,
+ &arg.mbssid_flags,
+ &arg.mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arg.ssid = arvif->u.ap.ssid;
+ arg.ssid_len = arvif->u.ap.ssid_len;
+ arg.hidden_ssid = arvif->u.ap.hidden_ssid;
+
+ /* For now allow DFS for AP mode */
+ arg.channel.chan_radar =
+ !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.freq2_radar = ctx->radar_enabled;
+
+ arg.channel.passive = arg.channel.chan_radar;
+
+ spin_lock_bh(&ab->base_lock);
+ arg.regdomain = ar->ab->dfs_region;
+ spin_unlock_bh(&ab->base_lock);
+ }
+
+ arg.channel.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "vdev %d start center_freq %d phymode %s\n",
+ arg.vdev_id, arg.channel.freq,
+ ath11k_wmi_phymode_str(arg.channel.mode));
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, restart);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to %s WMI vdev %i\n",
+ restart ? "restart" : "start", arg.vdev_id);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to synchronize setup for vdev %i %s: %d\n",
+ arg.vdev_id, restart ? "restart" : "start", ret);
+ return ret;
+ }
+
+ if (!restart)
+ ar->num_started_vdevs++;
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ /* Enable CAC Flag in the driver by checking the channel DFS cac time,
+ * i.e dfs_cac_ms value which will be valid only for radar channels
+ * and state as NL80211_DFS_USABLE which indicates CAC needs to be
+ * done before channel usage. This flags is used to drop rx packets.
+ * during CAC.
+ */
+ /* TODO Set the flag for other interface types as required */
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP &&
+ chandef->chan->dfs_cac_ms &&
+ chandef->chan->dfs_state == NL80211_DFS_USABLE) {
+ set_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "CAC Started in chan_freq %d for vdev %d\n",
+ arg.channel.freq, arg.vdev_id);
+ }
+
+ ret = ath11k_mac_set_txbf_conf(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to set txbf conf for vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ return 0;
+}
+
+static int ath11k_mac_vdev_stop(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop WMI vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
+
+ WARN_ON(ar->num_started_vdevs == 0);
+
+ ar->num_started_vdevs--;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
+ arvif->vif->addr, arvif->vdev_id);
+
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ clear_bit(ATH11K_CAC_RUNNING, &ar->dev_flags);
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "CAC Stopped for vdev %d\n",
+ arvif->vdev_id);
+ }
+
+ return 0;
+err:
+ return ret;
+}
+
+static int ath11k_mac_vdev_start(struct ath11k_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ return ath11k_mac_vdev_start_restart(arvif, ctx, false);
+}
+
+static int ath11k_mac_vdev_restart(struct ath11k_vif *arvif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ return ath11k_mac_vdev_start_restart(arvif, ctx, true);
+}
+
+struct ath11k_mac_change_chanctx_arg {
+ struct ieee80211_chanctx_conf *ctx;
+ struct ieee80211_vif_chanctx_switch *vifs;
+ int n_vifs;
+ int next_vif;
+};
+
+static void
+ath11k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+
+ if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
+ return;
+
+ arg->n_vifs++;
+}
+
+static void
+ath11k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_mac_change_chanctx_arg *arg = data;
+ struct ieee80211_chanctx_conf *ctx;
+
+ ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
+ if (ctx != arg->ctx)
+ return;
+
+ if (WARN_ON(arg->next_vif == arg->n_vifs))
+ return;
+
+ arg->vifs[arg->next_vif].vif = vif;
+ arg->vifs[arg->next_vif].old_ctx = ctx;
+ arg->vifs[arg->next_vif].new_ctx = ctx;
+ arg->next_vif++;
+}
+
+static void
+ath11k_mac_update_vif_chan(struct ath11k *ar,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif, *tx_arvif = NULL;
+ struct ieee80211_vif *mbssid_tx_vif;
+ int ret;
+ int i;
+ bool monitor_vif = false;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* Associated channel resources of all relevant vdevs
+ * should be available for the channel switch now.
+ */
+
+ /* TODO: Update ar->rx_channel */
+
+ for (i = 0; i < n_vifs; i++) {
+ arvif = ath11k_vif_to_arvif(vifs[i].vif);
+
+ if (WARN_ON(!arvif->is_started))
+ continue;
+
+ /* change_chanctx can be called even before vdev_up from
+ * ieee80211_start_ap->ieee80211_vif_use_channel->
+ * ieee80211_recalc_radar_chanctx.
+ *
+ * Firmware expect vdev_restart only if vdev is up.
+ * If vdev is down then it expect vdev_stop->vdev_start.
+ */
+ if (arvif->is_up) {
+ ret = ath11k_mac_vdev_restart(arvif, vifs[i].new_ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to restart vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ } else {
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret) {
+ ath11k_warn(ab, "failed to stop vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+
+ ret = ath11k_mac_vdev_start(arvif, vifs[i].new_ctx);
+ if (ret)
+ ath11k_warn(ab, "failed to start vdev %d: %d\n",
+ arvif->vdev_id, ret);
+
+ continue;
+ }
+
+ ret = ath11k_mac_setup_bcn_tmpl(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
+ ret);
+
+ mbssid_tx_vif = arvif->vif->mbssid_tx_vif;
+ if (mbssid_tx_vif)
+ tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif);
+
+ ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
+ arvif->bssid,
+ tx_arvif ? tx_arvif->bssid : NULL,
+ arvif->vif->bss_conf.bssid_index,
+ 1 << arvif->vif->bss_conf.bssid_indicator);
+ if (ret) {
+ ath11k_warn(ab, "failed to bring vdev up %d: %d\n",
+ arvif->vdev_id, ret);
+ continue;
+ }
+ }
+
+ /* Restart the internal monitor vdev on new channel */
+ if (!monitor_vif &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+ }
+}
+
+static void
+ath11k_mac_update_active_vif_chan(struct ath11k *ar,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k_mac_change_chanctx_arg arg = { .ctx = ctx };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_cnt_iter,
+ &arg);
+ if (arg.n_vifs == 0)
+ return;
+
+ arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), GFP_KERNEL);
+ if (!arg.vifs)
+ return;
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_mac_change_chanctx_fill_iter,
+ &arg);
+
+ ath11k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
+
+ kfree(arg.vifs);
+}
+
+static void ath11k_mac_op_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx change freq %u width %d ptr %p changed %x\n",
+ ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
+
+ /* This shouldn't really happen because channel switching should use
+ * switch_vif_chanctx().
+ */
+ if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
+ goto unlock;
+
+ if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH ||
+ changed & IEEE80211_CHANCTX_CHANGE_RADAR)
+ ath11k_mac_update_active_vif_chan(ar, ctx);
+
+ /* TODO: Recalc radar detection */
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+
+ if (WARN_ON(arvif->is_started))
+ return -EBUSY;
+
+ ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ arvif->chanctx.def.chan->center_freq, ret);
+ return ret;
+ }
+
+ /* Reconfigure hardware rate code since it is cleared by firmware.
+ */
+ if (ar->hw_rate_code > 0) {
+ u32 vdev_param = WMI_VDEV_PARAM_MGMT_RATE;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, vdev_param,
+ ar->hw_rate_code);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set mgmt tx rate %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr,
+ NULL, 0, 0);
+ if (ret) {
+ ath11k_warn(ab, "failed put monitor up: %d\n", ret);
+ return ret;
+ }
+ }
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
+static int
+ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ int ret;
+ struct peer_create_params param;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx assign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ /* for QCA6390 bss peer must be created before vdev_start */
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
+ memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
+ ret = 0;
+ goto out;
+ }
+
+ if (WARN_ON(arvif->is_started)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ param.vdev_id = arvif->vdev_id;
+ param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ param.peer_addr = ar->mac_addr;
+
+ ret = ath11k_peer_create(ar, arvif, NULL, &param);
+ if (ret) {
+ ath11k_warn(ab, "failed to create peer after vdev start delay: %d",
+ ret);
+ goto out;
+ }
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+
+ arvif->is_started = true;
+ goto out;
+ }
+
+ ret = ath11k_mac_vdev_start(arvif, ctx);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ ctx->def.chan->center_freq, ret);
+ goto out;
+ }
+
+ arvif->is_started = true;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+ }
+
+ /* TODO: Setup ps and cts/rts protection */
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_peer *peer;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "chanctx unassign ptr %p vdev_id %i\n",
+ ctx, arvif->vdev_id);
+
+ WARN_ON(!arvif->is_started);
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ab, ar->mac_addr);
+ spin_unlock_bh(&ab->base_lock);
+ if (peer)
+ ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ arvif->is_started = false;
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ ret = ath11k_mac_vdev_stop(arvif);
+ if (ret)
+ ath11k_warn(ab, "failed to stop vdev %i: %d\n",
+ arvif->vdev_id, ret);
+
+ arvif->is_started = false;
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA) {
+ ret = ath11k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to delete peer %pM for vdev %d: %d\n",
+ arvif->bssid, arvif->vdev_id, ret);
+ else
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "removed peer %pM vdev %d after vdev stop\n",
+ arvif->bssid, arvif->vdev_id);
+ }
+
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ ar->num_started_vdevs == 1 &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ath11k_mac_11d_scan_start(ar, arvif->vdev_id);
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int
+ath11k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif_chanctx_switch *vifs,
+ int n_vifs,
+ enum ieee80211_chanctx_switch_mode mode)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "chanctx switch n_vifs %d mode %d\n",
+ n_vifs, mode);
+ ath11k_mac_update_vif_chan(ar, vifs, n_vifs);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath11k_set_vdev_param_to_all_vifs(struct ath11k *ar, int param, u32 value)
+{
+ struct ath11k_vif *arvif;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "setting mac vdev %d param %d value %d\n",
+ param, arvif->vdev_id, value);
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param, value);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set param %d for vdev %d: %d\n",
+ param, arvif->vdev_id, ret);
+ break;
+ }
+ }
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+/* mac80211 stores device specific RTS/Fragmentation threshold value,
+ * this is set interface specific to firmware from ath11k driver
+ */
+static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct ath11k *ar = hw->priv;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+
+ return ath11k_set_vdev_param_to_all_vifs(ar, param_id, value);
+}
+
+static int ath11k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ /* Even though there's a WMI vdev param for fragmentation threshold no
+ * known firmware actually implements it. Moreover it is not possible to
+ * rely frame fragmentation to mac80211 because firmware clears the
+ * "more fragments" bit in frame control making it impossible for remote
+ * devices to reassemble frames.
+ *
+ * Hence implement a dummy callback just to say fragmentation isn't
+ * supported. This effectively prevents mac80211 from doing frame
+ * fragmentation in software.
+ */
+ return -EOPNOTSUPP;
+}
+
+static int ath11k_mac_flush_tx_complete(struct ath11k *ar)
+{
+ long time_left;
+ int ret = 0;
+
+ time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
+ (atomic_read(&ar->dp.num_tx_pending) == 0),
+ ATH11K_FLUSH_TIMEOUT);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "failed to flush transmit queue, data pkts pending %d\n",
+ atomic_read(&ar->dp.num_tx_pending));
+ ret = -ETIMEDOUT;
+ }
+
+ time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
+ (atomic_read(&ar->num_pending_mgmt_tx) == 0),
+ ATH11K_FLUSH_TIMEOUT);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "failed to flush mgmt transmit queue, mgmt pkts pending %d\n",
+ atomic_read(&ar->num_pending_mgmt_tx));
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+int ath11k_mac_wait_tx_complete(struct ath11k *ar)
+{
+ ath11k_mac_drain_tx(ar);
+ return ath11k_mac_flush_tx_complete(ar);
+}
+
+static void ath11k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct ath11k *ar = hw->priv;
+
+ if (drop)
+ return;
+
+ ath11k_mac_flush_tx_complete(ar);
+}
+
+static bool
+ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+
+ num_rates = hweight32(mask->control[band].legacy);
+
+ if (ath11k_mac_bitrate_mask_num_ht_rates(ar, band, mask))
+ return false;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
+ return false;
+
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask))
+ return false;
+
+ return num_rates == 1;
+}
+
+static __le16
+ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap)
+{
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_80p80;
+
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_160;
+
+ return he_cap->he_mcs_nss_supp.tx_mcs_80;
+}
+
+static bool
+ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *nss)
+{
+ struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
+ u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u16 he_mcs_map = 0;
+ u8 ht_nss_mask = 0;
+ u8 vht_nss_mask = 0;
+ u8 he_nss_mask = 0;
+ int i;
+
+ /* No need to consider legacy here. Basic rates are always present
+ * in bitrate mask
+ */
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (mask->control[band].ht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].ht_mcs[i] ==
+ sband->ht_cap.mcs.rx_mask[i])
+ ht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (mask->control[band].vht_mcs[i] == 0)
+ continue;
+ else if (mask->control[band].vht_mcs[i] ==
+ ath11k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
+ vht_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(&sband->iftype_data->he_cap));
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (mask->control[band].he_mcs[i] == 0)
+ continue;
+
+ if (mask->control[band].he_mcs[i] ==
+ ath11k_mac_get_max_he_mcs_map(he_mcs_map, i))
+ he_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask)
+ return false;
+
+ if (ht_nss_mask == 0)
+ return false;
+
+ if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
+ return false;
+
+ *nss = fls(ht_nss_mask);
+
+ return true;
+}
+
+static int
+ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u32 *rate, u8 *nss)
+{
+ int rate_idx;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (hweight32(mask->control[band].legacy) != 1)
+ return -EINVAL;
+
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ)
+ rate_idx += ATH11K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath11k_legacy_rates[rate_idx].hw_value;
+ bitrate = ath11k_legacy_rates[rate_idx].bitrate;
+
+ if (ath11k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = ATH11K_HW_RATE_CODE(hw_rate, 0, preamble);
+
+ return 0;
+}
+
+static int
+ath11k_mac_set_fixed_rate_gi_ltf(struct ath11k_vif *arvif, u8 he_gi, u8 he_ltf)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */
+ if (he_gi && he_gi != 0xFF)
+ he_gi += 1;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_SGI, he_gi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he gi %d: %d\n",
+ he_gi, ret);
+ return ret;
+ }
+ /* start from 1 */
+ if (he_ltf != 0xFF)
+ he_ltf += 1;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_HE_LTF, he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he ltf %d: %d\n",
+ he_ltf, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+ath11k_mac_set_auto_rate_gi_ltf(struct ath11k_vif *arvif, u16 he_gi, u8 he_ltf)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+ u32 he_ar_gi_ltf;
+
+ if (he_gi != 0xFF) {
+ switch (he_gi) {
+ case NL80211_RATE_INFO_HE_GI_0_8:
+ he_gi = WMI_AUTORATE_800NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_1_6:
+ he_gi = WMI_AUTORATE_1600NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_3_2:
+ he_gi = WMI_AUTORATE_3200NS_GI;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he gi: %d\n", he_gi);
+ return -EINVAL;
+ }
+ }
+
+ if (he_ltf != 0xFF) {
+ switch (he_ltf) {
+ case NL80211_RATE_INFO_HE_1XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_1X;
+ break;
+ case NL80211_RATE_INFO_HE_2XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_2X;
+ break;
+ case NL80211_RATE_INFO_HE_4XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_4X;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he ltf: %d\n", he_ltf);
+ return -EINVAL;
+ }
+ }
+
+ he_ar_gi_ltf = he_gi | he_ltf;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG,
+ he_ar_gi_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to set he autorate gi %u ltf %u: %d\n",
+ he_gi, he_ltf, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_set_rate_params(struct ath11k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc,
+ u8 he_gi, u8 he_ltf, bool he_fixed_rate)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n",
+ arvif->vdev_id, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
+
+ if (!arvif->vif->bss_conf.he_support) {
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
+ return ret;
+ }
+
+ vdev_param = WMI_VDEV_PARAM_LDPC;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, ldpc);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
+ ldpc, ret);
+ return ret;
+ }
+
+ if (arvif->vif->bss_conf.he_support) {
+ if (he_fixed_rate) {
+ ret = ath11k_mac_set_fixed_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = ath11k_mac_set_auto_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set auto rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ } else {
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static bool
+ath11k_mac_vht_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 vht_mcs;
+
+ for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
+ vht_mcs = mask->control[band].vht_mcs[i];
+
+ switch (vht_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(9) - 1:
+ case BIT(10) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+ath11k_mac_he_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 he_mcs;
+
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = mask->control[band].he_mcs[i];
+
+ switch (he_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(10) - 1:
+ case BIT(12) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void ath11k_mac_set_bitrate_mask_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
+ spin_unlock_bh(&ar->data_lock);
+
+ ieee80211_queue_work(ar->hw, &arsta->update_wk);
+}
+
+static void ath11k_mac_disable_peer_fixed_rate(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath11k_vif *arvif = data;
+ struct ath11k *ar = arvif->ar;
+ int ret;
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for STA %pM ret %d\n",
+ sta->addr, ret);
+}
+
+static bool
+ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ bool he_fixed_rate = false, vht_fixed_rate = false;
+ struct ath11k_peer *peer;
+ const u16 *vht_mcs_mask, *he_mcs_mask;
+ struct ieee80211_link_sta *deflink;
+ u8 vht_nss, he_nss;
+ bool ret = true;
+
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1)
+ vht_fixed_rate = true;
+
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1)
+ he_fixed_rate = true;
+
+ if (!vht_fixed_rate && !he_fixed_rate)
+ return true;
+
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->ab->base_lock);
+ list_for_each_entry(peer, &ar->ab->peers, list) {
+ if (peer->sta) {
+ deflink = &peer->sta->deflink;
+
+ if (vht_fixed_rate && (!deflink->vht_cap.vht_supported ||
+ deflink->rx_nss < vht_nss)) {
+ ret = false;
+ goto out;
+ }
+
+ if (he_fixed_rate && (!deflink->he_cap.has_he ||
+ deflink->rx_nss < he_nss)) {
+ ret = false;
+ goto out;
+ }
+ }
+ }
+
+out:
+ spin_unlock_bh(&ar->ab->base_lock);
+ rcu_read_unlock();
+ return ret;
+}
+
+static int
+ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct cfg80211_chan_def def;
+ struct ath11k_pdev_cap *cap;
+ struct ath11k *ar = arvif->ar;
+ enum nl80211_band band;
+ const u8 *ht_mcs_mask;
+ const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ u8 he_ltf = 0;
+ u8 he_gi = 0;
+ u32 rate;
+ u8 nss;
+ u8 sgi;
+ u8 ldpc;
+ int single_nss;
+ int ret;
+ int num_rates;
+ bool he_fixed_rate = false;
+
+ if (ath11k_mac_vif_chan(vif, &def))
+ return -EPERM;
+
+ band = def.chan->band;
+ cap = &ar->pdev->cap;
+ ht_mcs_mask = mask->control[band].ht_mcs;
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
+ ldpc = !!(cap->band[band].ht_cap_info & WMI_HT_CAP_TX_LDPC);
+
+ sgi = mask->control[band].gi;
+ if (sgi == NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+
+ he_gi = mask->control[band].he_gi;
+ he_ltf = mask->control[band].he_ltf;
+
+ /* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
+ * requires passing at least one of used basic rates along with them.
+ * Fixed rate setting across different preambles(legacy, HT, VHT) is
+ * not supported by the FW. Hence use of FIXED_RATE vdev param is not
+ * suitable for setting single HT/VHT rates.
+ * But, there could be a single basic rate passed from userspace which
+ * can be done through the FIXED_RATE param.
+ */
+ if (ath11k_mac_has_single_legacy_rate(ar, band, mask)) {
+ ret = ath11k_mac_get_single_legacy_rate(ar, band, mask, &rate,
+ &nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to get single legacy rate for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+ } else if (ath11k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+ nss = single_nss;
+ mutex_lock(&ar->conf_mutex);
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+ mutex_unlock(&ar->conf_mutex);
+ } else {
+ rate = WMI_FIXED_RATE_NONE;
+
+ if (!ath11k_mac_validate_vht_he_fixed_rate_settings(ar, band, mask))
+ ath11k_warn(ar->ab,
+ "could not update fixed rate settings to all peers due to mcs/nss incompatibility\n");
+ nss = min_t(u32, ar->num_tx_chains,
+ max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)),
+ ath11k_mac_max_he_nss(he_mcs_mask)));
+
+ /* If multiple rates across different preambles are given
+ * we can reconfigure this info with all peers using PEER_ASSOC
+ * command with the below exception cases.
+ * - Single VHT Rate : peer_assoc command accommodates only MCS
+ * range values i.e 0-7, 0-8, 0-9 for VHT. Though mac80211
+ * mandates passing basic rates along with HT/VHT rates, FW
+ * doesn't allow switching from VHT to Legacy. Hence instead of
+ * setting legacy and VHT rates using RATEMASK_CMD vdev cmd,
+ * we could set this VHT rate as peer fixed rate param, which
+ * will override FIXED rate and FW rate control algorithm.
+ * If single VHT rate is passed along with HT rates, we select
+ * the VHT rate as fixed rate for vht peers.
+ * - Multiple VHT Rates : When Multiple VHT rates are given,this
+ * can be set using RATEMASK CMD which uses FW rate-ctl alg.
+ * TODO: Setting multiple VHT MCS and replacing peer_assoc with
+ * RATEMASK_CMDID can cover all use cases of setting rates
+ * across multiple preambles and rates within same type.
+ * But requires more validation of the command at this point.
+ */
+
+ num_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
+ mask);
+
+ if (!ath11k_mac_vht_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ /* TODO: Handle multiple VHT MCS values setting using
+ * RATEMASK CMD
+ */
+ ath11k_warn(ar->ab,
+ "setting %d mcs values in bitrate mask not supported\n",
+ num_rates);
+ return -EINVAL;
+ }
+
+ num_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
+ if (num_rates == 1)
+ he_fixed_rate = true;
+
+ if (!ath11k_mac_he_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ ath11k_warn(ar->ab,
+ "Setting more than one HE MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_disable_peer_fixed_rate,
+ arvif);
+
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static void
+ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ int recovery_count;
+ struct ath11k_vif *arvif;
+
+ if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
+ return;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH11K_STATE_RESTARTED) {
+ ath11k_warn(ar->ab, "pdev %d successfully recovered\n",
+ ar->pdev->pdev_id);
+ ar->state = ATH11K_STATE_ON;
+ ieee80211_wake_queues(ar->hw);
+
+ if (ar->ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ar->alpha2, 2);
+ ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ }
+
+ if (ab->is_reset) {
+ recovery_count = atomic_inc_return(&ab->recovery_count);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "recovery count %d\n", recovery_count);
+ /* When there are multiple radios in an SOC,
+ * the recovery has to be done for each radio
+ */
+ if (recovery_count == ab->num_radios) {
+ atomic_dec(&ab->reset_count);
+ complete(&ab->reset_complete);
+ ab->is_reset = false;
+ atomic_set(&ab->fail_cont_count, 0);
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset success\n");
+ }
+ }
+ if (ar->ab->hw_params.support_fw_mac_sequence) {
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
+ ieee80211_hw_restart_disconnect(arvif->vif);
+ }
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void
+ath11k_mac_update_bss_chan_survey(struct ath11k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_chan_info_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64, ar->ab->wmi_ab.svc_map) ||
+ ar->rx_channel != channel)
+ return;
+
+ if (ar->scan.state != ATH11K_SCAN_IDLE) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "ignoring bss chan info req while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath11k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (ret == 0)
+ ath11k_warn(ar->ab, "bss channel survey timed out\n");
+}
+
+static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey;
+ int ret = 0;
+
+ if (idx >= ATH11K_NUM_CHANS)
+ return -ENOENT;
+
+ ar_survey = &ar->survey[idx];
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ ath11k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+ if (ar->rx_channel == survey->channel)
+ survey->filled |= SURVEY_INFO_IN_USE;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static void ath11k_mac_put_chain_rssi(struct station_info *sinfo,
+ struct ath11k_sta *arsta,
+ char *pre,
+ bool clear)
+{
+ struct ath11k *ar = arsta->arvif->ar;
+ int i;
+ s8 rssi;
+
+ for (i = 0; i < ARRAY_SIZE(sinfo->chain_signal); i++) {
+ sinfo->chains &= ~BIT(i);
+ rssi = arsta->chain_signal[i];
+ if (clear)
+ arsta->chain_signal[i] = ATH11K_INVALID_RSSI_FULL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "sta statistics %s rssi[%d] %d\n", pre, i, rssi);
+
+ if (rssi != ATH11K_DEFAULT_NOISE_FLOOR &&
+ rssi != ATH11K_INVALID_RSSI_FULL &&
+ rssi != ATH11K_INVALID_RSSI_EMPTY &&
+ rssi != 0) {
+ sinfo->chain_signal[i] = rssi;
+ sinfo->chains |= BIT(i);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL);
+ }
+ }
+}
+
+static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+ struct ath11k *ar = arsta->arvif->ar;
+ s8 signal;
+ bool db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ar->ab->wmi_ab.svc_map);
+
+ sinfo->rx_duration = arsta->rx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
+
+ sinfo->tx_duration = arsta->tx_duration;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
+
+ if (arsta->txrate.legacy || arsta->txrate.nss) {
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ sinfo->txrate.he_gi = arsta->txrate.he_gi;
+ sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
+ sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
+ ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false);
+
+ if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
+ arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ar->ab->hw_params.supports_rssi_stats &&
+ !ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
+ ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true);
+ }
+
+ signal = arsta->rssi_comb;
+ if (!signal &&
+ arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ ar->ab->hw_params.supports_rssi_stats &&
+ !(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_VDEV_STAT)))
+ signal = arsta->rssi_beacon;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "sta statistics db2dbm %u rssi comb %d rssi beacon %d\n",
+ db2dbm, arsta->rssi_comb, arsta->rssi_beacon);
+
+ if (signal) {
+ sinfo->signal = db2dbm ? signal : signal + ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
+ }
+
+ sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) +
+ ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void ath11k_generate_ns_mc_addr(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload)
+{
+ int i;
+
+ for (i = 0; i < offload->ipv6_count; i++) {
+ offload->self_ipv6_addr[i][0] = 0xff;
+ offload->self_ipv6_addr[i][1] = 0x02;
+ offload->self_ipv6_addr[i][11] = 0x01;
+ offload->self_ipv6_addr[i][12] = 0xff;
+ offload->self_ipv6_addr[i][13] =
+ offload->ipv6_addr[i][13];
+ offload->self_ipv6_addr[i][14] =
+ offload->ipv6_addr[i][14];
+ offload->self_ipv6_addr[i][15] =
+ offload->ipv6_addr[i][15];
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "NS solicited addr %pI6\n",
+ offload->self_ipv6_addr[i]);
+ }
+}
+
+static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_arp_ns_offload *offload;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct inet6_ifaddr *ifa6;
+ struct ifacaddr6 *ifaca6;
+ struct list_head *p;
+ u32 count, scope;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "op ipv6 changed\n");
+
+ offload = &arvif->arp_ns_offload;
+ count = 0;
+
+ read_lock_bh(&idev->lock);
+
+ memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
+ memset(offload->self_ipv6_addr, 0, sizeof(offload->self_ipv6_addr));
+ memcpy(offload->mac_addr, vif->addr, ETH_ALEN);
+
+ /* get unicast address */
+ list_for_each(p, &idev->addr_list) {
+ if (count >= ATH11K_IPV6_MAX_COUNT)
+ goto generate;
+
+ ifa6 = list_entry(p, struct inet6_ifaddr, if_list);
+ if (ifa6->flags & IFA_F_DADFAILED)
+ continue;
+ scope = ipv6_addr_src_scope(&ifa6->addr);
+ if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
+ scope == IPV6_ADDR_SCOPE_GLOBAL) {
+ memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
+ sizeof(ifa6->addr.s6_addr));
+ offload->ipv6_type[count] = ATH11K_IPV6_UC_TYPE;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 uc %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported ipv6 scope: %d\n", scope);
+ }
+ }
+
+ /* get anycast address */
+ for (ifaca6 = idev->ac_list; ifaca6; ifaca6 = ifaca6->aca_next) {
+ if (count >= ATH11K_IPV6_MAX_COUNT)
+ goto generate;
+
+ scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
+ if (scope == IPV6_ADDR_SCOPE_LINKLOCAL ||
+ scope == IPV6_ADDR_SCOPE_GLOBAL) {
+ memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
+ sizeof(ifaca6->aca_addr));
+ offload->ipv6_type[count] = ATH11K_IPV6_AC_TYPE;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "count %d ipv6 ac %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ } else {
+ ath11k_warn(ar->ab, "Unsupported ipv scope: %d\n", scope);
+ }
+ }
+
+generate:
+ offload->ipv6_count = count;
+ read_unlock_bh(&idev->lock);
+
+ /* generate ns multicast address */
+ ath11k_generate_ns_mc_addr(ar, offload);
+}
+#endif
+
+static void ath11k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "set rekey data vdev %d\n",
+ arvif->vdev_id);
+
+ mutex_lock(&ar->conf_mutex);
+
+ memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
+ memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
+
+ /* The supplicant works on big-endian, the firmware expects it on
+ * little endian.
+ */
+ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
+
+ arvif->rekey_data.enable_offload = true;
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kck", NULL,
+ rekey_data->kck, NL80211_KCK_LEN);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "kek", NULL,
+ rekey_data->kck, NL80211_KEK_LEN);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_MAC, "replay ctr", NULL,
+ &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath11k_mac_op_set_bios_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct ath11k *ar = hw->priv;
+ const struct cfg80211_sar_sub_specs *sspec;
+ int ret, index;
+ u8 *sar_tbl;
+ u32 i;
+
+ if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
+ sar->num_sub_specs == 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (!test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) ||
+ !ar->ab->hw_params.bios_sar_capa) {
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ ret = ath11k_wmi_pdev_set_bios_geo_table_param(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set geo table: %d\n", ret);
+ goto exit;
+ }
+
+ sar_tbl = kzalloc(BIOS_SAR_TABLE_LEN, GFP_KERNEL);
+ if (!sar_tbl) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ sspec = sar->sub_specs;
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ if (sspec->freq_range_index >= (BIOS_SAR_TABLE_LEN >> 1)) {
+ ath11k_warn(ar->ab, "Ignore bad frequency index %u, max allowed %u\n",
+ sspec->freq_range_index, BIOS_SAR_TABLE_LEN >> 1);
+ continue;
+ }
+
+ /* chain0 and chain1 share same power setting */
+ sar_tbl[sspec->freq_range_index] = sspec->power;
+ index = sspec->freq_range_index + (BIOS_SAR_TABLE_LEN >> 1);
+ sar_tbl[index] = sspec->power;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "sar tbl[%d] = %d\n",
+ sspec->freq_range_index, sar_tbl[sspec->freq_range_index]);
+ sspec++;
+ }
+
+ ret = ath11k_wmi_pdev_set_bios_sar_table_param(ar, sar_tbl);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set sar power: %d", ret);
+
+ kfree(sar_tbl);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath11k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.roc_notify = false;
+ spin_unlock_bh(&ar->data_lock);
+
+ ath11k_scan_abort(ar);
+
+ mutex_unlock(&ar->conf_mutex);
+
+ cancel_delayed_work_sync(&ar->scan.timeout);
+
+ return 0;
+}
+
+static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel *chan,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct scan_req_params arg;
+ int ret;
+ u32 scan_time_msec;
+
+ mutex_lock(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ reinit_completion(&ar->scan.started);
+ reinit_completion(&ar->scan.completed);
+ reinit_completion(&ar->scan.on_channel);
+ ar->scan.state = ATH11K_SCAN_STARTING;
+ ar->scan.is_roc = true;
+ ar->scan.vdev_id = arvif->vdev_id;
+ ar->scan.roc_freq = chan->center_freq;
+ ar->scan.roc_notify = true;
+ ret = 0;
+ break;
+ case ATH11K_SCAN_STARTING:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ret = -EBUSY;
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ if (ret)
+ goto exit;
+
+ scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
+
+ memset(&arg, 0, sizeof(arg));
+ ath11k_wmi_start_scan_init(ar, &arg);
+ arg.num_chan = 1;
+ arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+ GFP_KERNEL);
+ if (!arg.chan_list) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.scan_id = ATH11K_SCAN_ID;
+ arg.chan_list[0] = chan->center_freq;
+ arg.dwell_time_active = scan_time_msec;
+ arg.dwell_time_passive = scan_time_msec;
+ arg.max_scan_time = scan_time_msec;
+ arg.scan_flags |= WMI_SCAN_FLAG_PASSIVE;
+ arg.scan_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ arg.burst_duration = duration;
+
+ ret = ath11k_start_scan(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.state = ATH11K_SCAN_IDLE;
+ spin_unlock_bh(&ar->data_lock);
+ goto free_chan_list;
+ }
+
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ar->ab, "failed to switch to channel for roc scan\n");
+ ret = ath11k_scan_stop(ar);
+ if (ret)
+ ath11k_warn(ar->ab, "failed to stop scan: %d\n", ret);
+ ret = -ETIMEDOUT;
+ goto free_chan_list;
+ }
+
+ ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
+ msecs_to_jiffies(duration));
+
+ ret = 0;
+
+free_chan_list:
+ kfree(arg.chan_list);
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ ar->fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ spin_unlock_bh(&ar->data_lock);
+
+ reinit_completion(&ar->fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
+ 1 * HZ);
+
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ int *dbm)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param = {0};
+ struct ath11k_fw_stats_pdev *pdev;
+ int ret;
+
+ /* Final Tx power is minimum of Target Power, CTL power, Regulatory
+ * Power, PSD EIRP Power. We just know the Regulatory power from the
+ * regulatory rules obtained. FW knows all these power and sets the min
+ * of these. Hence, we request the FW pdev stats in which FW reports
+ * the minimum of all vdev's channel Tx power.
+ */
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ goto err_fallback;
+
+ /* Firmware doesn't provide Tx power during CAC hence no need to fetch
+ * the stats.
+ */
+ if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EAGAIN;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_fallback;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ spin_unlock_bh(&ar->data_lock);
+ goto err_fallback;
+ }
+
+ /* tx power is set as 2 units per dBm in FW. */
+ *dbm = pdev->chan_tx_power / 2;
+
+ spin_unlock_bh(&ar->data_lock);
+ mutex_unlock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware %d, reported %d dBm\n",
+ pdev->chan_tx_power, *dbm);
+ return 0;
+
+err_fallback:
+ mutex_unlock(&ar->conf_mutex);
+ /* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
+ *dbm = vif->bss_conf.txpower;
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+ *dbm);
+ return 0;
+}
+
+static const struct ieee80211_ops ath11k_ops = {
+ .tx = ath11k_mac_op_tx,
+ .wake_tx_queue = ieee80211_handle_wake_tx_queue,
+ .start = ath11k_mac_op_start,
+ .stop = ath11k_mac_op_stop,
+ .reconfig_complete = ath11k_mac_op_reconfig_complete,
+ .add_interface = ath11k_mac_op_add_interface,
+ .remove_interface = ath11k_mac_op_remove_interface,
+ .update_vif_offload = ath11k_mac_op_update_vif_offload,
+ .config = ath11k_mac_op_config,
+ .bss_info_changed = ath11k_mac_op_bss_info_changed,
+ .configure_filter = ath11k_mac_op_configure_filter,
+ .hw_scan = ath11k_mac_op_hw_scan,
+ .cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
+ .set_key = ath11k_mac_op_set_key,
+ .set_rekey_data = ath11k_mac_op_set_rekey_data,
+ .sta_state = ath11k_mac_op_sta_state,
+ .sta_set_4addr = ath11k_mac_op_sta_set_4addr,
+ .sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
+ .sta_rc_update = ath11k_mac_op_sta_rc_update,
+ .conf_tx = ath11k_mac_op_conf_tx,
+ .set_antenna = ath11k_mac_op_set_antenna,
+ .get_antenna = ath11k_mac_op_get_antenna,
+ .ampdu_action = ath11k_mac_op_ampdu_action,
+ .add_chanctx = ath11k_mac_op_add_chanctx,
+ .remove_chanctx = ath11k_mac_op_remove_chanctx,
+ .change_chanctx = ath11k_mac_op_change_chanctx,
+ .assign_vif_chanctx = ath11k_mac_op_assign_vif_chanctx,
+ .unassign_vif_chanctx = ath11k_mac_op_unassign_vif_chanctx,
+ .switch_vif_chanctx = ath11k_mac_op_switch_vif_chanctx,
+ .set_rts_threshold = ath11k_mac_op_set_rts_threshold,
+ .set_frag_threshold = ath11k_mac_op_set_frag_threshold,
+ .set_bitrate_mask = ath11k_mac_op_set_bitrate_mask,
+ .get_survey = ath11k_mac_op_get_survey,
+ .flush = ath11k_mac_op_flush,
+ .sta_statistics = ath11k_mac_op_sta_statistics,
+ CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
+
+#ifdef CONFIG_PM
+ .suspend = ath11k_wow_op_suspend,
+ .resume = ath11k_wow_op_resume,
+ .set_wakeup = ath11k_wow_op_set_wakeup,
+#endif
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+ .sta_add_debugfs = ath11k_debugfs_sta_op_add,
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+ .ipv6_addr_change = ath11k_mac_op_ipv6_changed,
+#endif
+ .get_txpower = ath11k_mac_op_get_txpower,
+
+ .set_sar_specs = ath11k_mac_op_set_bios_sar_specs,
+ .remain_on_channel = ath11k_mac_op_remain_on_channel,
+ .cancel_remain_on_channel = ath11k_mac_op_cancel_remain_on_channel,
+};
+
+static void ath11k_mac_update_ch_list(struct ath11k *ar,
+ struct ieee80211_supported_band *band,
+ u32 freq_low, u32 freq_high)
+{
+ int i;
+
+ if (!(freq_low && freq_high))
+ return;
+
+ for (i = 0; i < band->n_channels; i++) {
+ if (band->channels[i].center_freq < freq_low ||
+ band->channels[i].center_freq > freq_high)
+ band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+ }
+}
+
+static u32 ath11k_get_phy_id(struct ath11k *ar, u32 band)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+
+ if (band == WMI_HOST_WLAN_2G_CAP)
+ return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
+
+ if (band == WMI_HOST_WLAN_5G_CAP)
+ return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
+
+ ath11k_warn(ar->ab, "unsupported phy cap:%d\n", band);
+
+ return 0;
+}
+
+static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
+ u32 supported_bands)
+{
+ struct ieee80211_supported_band *band;
+ struct ath11k_hal_reg_capabilities_ext *reg_cap, *temp_reg_cap;
+ void *channels;
+ u32 phy_id;
+
+ BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
+ ARRAY_SIZE(ath11k_5ghz_channels) +
+ ARRAY_SIZE(ath11k_6ghz_channels)) !=
+ ATH11K_NUM_CHANS);
+
+ reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+ temp_reg_cap = reg_cap;
+
+ if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ channels = kmemdup(ath11k_2ghz_channels,
+ sizeof(ath11k_2ghz_channels),
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
+ band->band = NL80211_BAND_2GHZ;
+ band->n_channels = ARRAY_SIZE(ath11k_2ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_g_rates_size;
+ band->bitrates = ath11k_g_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+ ath11k_mac_update_ch_list(ar, band,
+ temp_reg_cap->low_2ghz_chan,
+ temp_reg_cap->high_2ghz_chan);
+ }
+
+ if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ if (reg_cap->high_5ghz_chan >= ATH11K_MIN_6G_FREQ) {
+ channels = kmemdup(ath11k_6ghz_channels,
+ sizeof(ath11k_6ghz_channels), GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ return -ENOMEM;
+ }
+
+ ar->supports_6ghz = true;
+ band = &ar->mac.sbands[NL80211_BAND_6GHZ];
+ band->band = NL80211_BAND_6GHZ;
+ band->n_channels = ARRAY_SIZE(ath11k_6ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_a_rates_size;
+ band->bitrates = ath11k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
+ ath11k_mac_update_ch_list(ar, band,
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
+ }
+
+ if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) {
+ channels = kmemdup(ath11k_5ghz_channels,
+ sizeof(ath11k_5ghz_channels),
+ GFP_KERNEL);
+ if (!channels) {
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+ return -ENOMEM;
+ }
+
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
+ band->band = NL80211_BAND_5GHZ;
+ band->n_channels = ARRAY_SIZE(ath11k_5ghz_channels);
+ band->channels = channels;
+ band->n_bitrates = ath11k_a_rates_size;
+ band->bitrates = ath11k_a_rates;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
+ ath11k_mac_update_ch_list(ar, band,
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_iface_combination *combinations;
+ struct ieee80211_iface_limit *limits;
+ int n_limits;
+
+ combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ n_limits = 2;
+
+ limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+ if (!limits) {
+ kfree(combinations);
+ return -ENOMEM;
+ }
+
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+
+ limits[1].max = 16;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = n_limits;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
+
+ ar->hw->wiphy->iface_combinations = combinations;
+ ar->hw->wiphy->n_iface_combinations = 1;
+
+ return 0;
+}
+
+static const u8 ath11k_if_types_ext_capa[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+};
+
+static const u8 ath11k_if_types_ext_capa_sta[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
+};
+
+static const u8 ath11k_if_types_ext_capa_ap[] = {
+ [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+ [10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
+};
+
+static const struct wiphy_iftype_ext_capab ath11k_iftypes_ext_capa[] = {
+ {
+ .extended_capabilities = ath11k_if_types_ext_capa,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa,
+ .extended_capabilities_len = sizeof(ath11k_if_types_ext_capa),
+ }, {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_sta,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_sta),
+ }, {
+ .iftype = NL80211_IFTYPE_AP,
+ .extended_capabilities = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_mask = ath11k_if_types_ext_capa_ap,
+ .extended_capabilities_len =
+ sizeof(ath11k_if_types_ext_capa_ap),
+ },
+};
+
+static void __ath11k_mac_unregister(struct ath11k *ar)
+{
+ cancel_work_sync(&ar->regd_update_work);
+
+ ieee80211_unregister_hw(ar->hw);
+
+ idr_for_each(&ar->txmgmt_idr, ath11k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+ SET_IEEE80211_DEV(ar->hw, NULL);
+}
+
+void ath11k_mac_unregister(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ __ath11k_mac_unregister(ar);
+ }
+
+ ath11k_peer_rhash_tbl_destroy(ab);
+}
+
+static int __ath11k_mac_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_pdev_cap *cap = &ar->pdev->cap;
+ static const u32 cipher_suites[] = {
+ WLAN_CIPHER_SUITE_TKIP,
+ WLAN_CIPHER_SUITE_CCMP,
+ WLAN_CIPHER_SUITE_AES_CMAC,
+ WLAN_CIPHER_SUITE_BIP_CMAC_256,
+ WLAN_CIPHER_SUITE_BIP_GMAC_128,
+ WLAN_CIPHER_SUITE_BIP_GMAC_256,
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+ WLAN_CIPHER_SUITE_CCMP_256,
+ };
+ int ret;
+ u32 ht_cap = 0;
+
+ ath11k_pdev_caps_update(ar);
+
+ SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
+
+ SET_IEEE80211_DEV(ar->hw, ab->dev);
+
+ ret = ath11k_mac_setup_channels_rates(ar,
+ cap->supported_bands);
+ if (ret)
+ goto err;
+
+ ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
+ ath11k_mac_setup_he_cap(ar, cap);
+
+ ret = ath11k_mac_setup_iface_combinations(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_free_channels;
+ }
+
+ ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
+ ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
+
+ ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes;
+
+ if (ab->hw_params.single_pdev_only && ar->supports_6ghz)
+ ieee80211_hw_set(ar->hw, SINGLE_SCAN_ON_ALL_BANDS);
+
+ if (ab->hw_params.supports_multi_bssid) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(ar->hw, SUPPORTS_ONLY_HE_MULTI_BSSID);
+ }
+
+ ieee80211_hw_set(ar->hw, SIGNAL_DBM);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PS);
+ ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
+ ieee80211_hw_set(ar->hw, MFP_CAPABLE);
+ ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(ar->hw, AP_LINK_PS);
+ ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
+ ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
+ ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+ ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
+ ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
+ ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
+ ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(ar->hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ }
+
+ if (cap->nss_ratio_enabled)
+ ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW);
+
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
+ ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
+ ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(ar->hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(ar->hw, USES_RSS);
+ }
+
+ ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+ /* TODO: Check if HT capability advertised from firmware is different
+ * for each band for a dual band capable radio. It will be tricky to
+ * handle it when the ht capability different for each band.
+ */
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
+ (ar->supports_6ghz && ab->hw_params.supports_dynamic_smps_6ghz))
+ ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
+
+ ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
+ ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
+
+ ar->hw->max_listen_interval = ATH11K_MAX_HW_LISTEN_INTERVAL;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ ar->hw->wiphy->max_remain_on_channel_duration = 5000;
+
+ ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
+
+ ar->max_num_stations = TARGET_NUM_STATIONS(ab);
+ ar->max_num_peers = TARGET_NUM_PEERS_PDEV(ab);
+
+ ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
+
+ if (test_bit(WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi->wmi_ab->svc_map)) {
+ ar->hw->wiphy->features |=
+ NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ ar->hw->wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ ar->hw->wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ }
+
+ ret = ath11k_wow_init(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to init wow: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
+ ar->ab->wmi_ab.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
+
+ ar->hw->queues = ATH11K_HW_MAX_QUEUES;
+ ar->hw->wiphy->tx_queue_len = ATH11K_QUEUE_LEN;
+ ar->hw->offchannel_tx_hw_queue = ATH11K_HW_MAX_QUEUES - 1;
+ ar->hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+
+ ar->hw->vif_data_size = sizeof(struct ath11k_vif);
+ ar->hw->sta_data_size = sizeof(struct ath11k_sta);
+
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+ wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+ if (test_bit(WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD,
+ ar->ab->wmi_ab.svc_map)) {
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_COLOR);
+ ieee80211_hw_set(ar->hw, DETECTS_COLOR_COLLISION);
+ }
+
+ ar->hw->wiphy->cipher_suites = cipher_suites;
+ ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ ar->hw->wiphy->iftype_ext_capab = ath11k_iftypes_ext_capa;
+ ar->hw->wiphy->num_iftype_ext_capab =
+ ARRAY_SIZE(ath11k_iftypes_ext_capa);
+
+ if (ar->supports_6ghz) {
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY);
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+ }
+
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+
+ if (test_bit(WMI_TLV_SERVICE_RTT, ar->ab->wmi_ab.svc_map))
+ wiphy_ext_feature_set(ar->hw->wiphy,
+ NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
+
+ ar->hw->wiphy->mbssid_max_interfaces = TARGET_NUM_VDEVS(ab);
+ ar->hw->wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
+
+ ath11k_reg_init(ar);
+
+ if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_BIOS_SAR_SUPPORT, ar->ab->wmi_ab.svc_map) &&
+ ab->hw_params.bios_sar_capa)
+ ar->hw->wiphy->sar_capa = ab->hw_params.bios_sar_capa;
+
+ ret = ieee80211_register_hw(ar->hw);
+ if (ret) {
+ ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
+ if (!ab->hw_params.supports_monitor)
+ /* There's a race between calling ieee80211_register_hw()
+ * and here where the monitor mode is enabled for a little
+ * while. But that time is so short and in practise it make
+ * a difference in real life.
+ */
+ ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
+
+ /* Apply the regd received during initialization */
+ ret = ath11k_regd_update(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
+ goto err_unregister_hw;
+ }
+
+ if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) {
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ab->new_alpha2, 2);
+ memcpy(&ar->alpha2, ab->new_alpha2, 2);
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set cc code for mac register: %d\n", ret);
+ }
+
+ ret = ath11k_debugfs_register(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
+ goto err_unregister_hw;
+ }
+
+ return 0;
+
+err_unregister_hw:
+ ieee80211_unregister_hw(ar->hw);
+
+err_free_if_combs:
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+err_free_channels:
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+
+err:
+ SET_IEEE80211_DEV(ar->hw, NULL);
+ return ret;
+}
+
+int ath11k_mac_register(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+ int ret;
+ u8 mac_addr[ETH_ALEN] = {0};
+
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
+
+ ret = ath11k_peer_rhash_tbl_init(ab);
+ if (ret)
+ return ret;
+
+ device_get_mac_address(ab->dev, mac_addr);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ if (is_zero_ether_addr(mac_addr))
+ ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ else
+ ether_addr_copy(ar->mac_addr, mac_addr);
+ ar->mac_addr[4] += i;
+ }
+
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+
+ ret = __ath11k_mac_register(ar);
+ if (ret)
+ goto err_cleanup;
+
+ init_waitqueue_head(&ar->txmgmt_empty_waitq);
+ }
+
+ return 0;
+
+err_cleanup:
+ for (i = i - 1; i >= 0; i--) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ __ath11k_mac_unregister(ar);
+ }
+
+ ath11k_peer_rhash_tbl_destroy(ab);
+
+ return ret;
+}
+
+int ath11k_mac_allocate(struct ath11k_base *ab)
+{
+ struct ieee80211_hw *hw;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int ret;
+ int i;
+
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops);
+ if (!hw) {
+ ath11k_warn(ab, "failed to allocate mac80211 hw device\n");
+ ret = -ENOMEM;
+ goto err_free_mac;
+ }
+
+ ar = hw->priv;
+ ar->hw = hw;
+ ar->ab = ab;
+ ar->pdev = pdev;
+ ar->pdev_idx = i;
+ ar->lmac_id = ath11k_hw_get_mac_from_pdev_id(&ab->hw_params, i);
+
+ ar->wmi = &ab->wmi_ab.wmi[i];
+ /* FIXME wmi[0] is already initialized during attach,
+ * Should we do this again?
+ */
+ ath11k_wmi_pdev_attach(ab, i);
+
+ ar->cfg_tx_chainmask = pdev->cap.tx_chain_mask;
+ ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
+ ar->num_tx_chains = get_num_chains(pdev->cap.tx_chain_mask);
+ ar->num_rx_chains = get_num_chains(pdev->cap.rx_chain_mask);
+
+ pdev->ar = ar;
+ spin_lock_init(&ar->data_lock);
+ INIT_LIST_HEAD(&ar->arvifs);
+ INIT_LIST_HEAD(&ar->ppdu_stats_info);
+ mutex_init(&ar->conf_mutex);
+ init_completion(&ar->vdev_setup_done);
+ init_completion(&ar->vdev_delete_done);
+ init_completion(&ar->peer_assoc_done);
+ init_completion(&ar->peer_delete_done);
+ init_completion(&ar->install_key_done);
+ init_completion(&ar->bss_survey_done);
+ init_completion(&ar->scan.started);
+ init_completion(&ar->scan.completed);
+ init_completion(&ar->scan.on_channel);
+ init_completion(&ar->thermal.wmi_sync);
+
+ INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
+ INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
+
+ INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
+ skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ ar->vdev_id_11d_scan = ATH11K_11D_INVALID_VDEV_ID;
+ init_completion(&ar->completed_11d_scan);
+
+ ath11k_fw_stats_init(ar);
+ }
+
+ return 0;
+
+err_free_mac:
+ ath11k_mac_destroy(ab);
+
+ return ret;
+}
+
+void ath11k_mac_destroy(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ ath11k_fw_stats_free(&ar->fw_stats);
+ ieee80211_free_hw(ar->hw);
+ pdev->ar = NULL;
+ }
+}
+
+int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath11k *ar = arvif->ar;
+ struct wmi_sta_keepalive_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = method;
+ arg.interval = interval;
+
+ ret = ath11k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
new file mode 100644
index 0000000000..0231783ad7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_MAC_H
+#define ATH11K_MAC_H
+
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include "wmi.h"
+
+struct ath11k;
+struct ath11k_base;
+
+struct ath11k_generic_iter {
+ struct ath11k *ar;
+ int ret;
+};
+
+/* number of failed packets (20 packets with 16 sw reties each) */
+#define ATH11K_KICKOUT_THRESHOLD (20 * 16)
+
+/* Use insanely high numbers to make sure that the firmware implementation
+ * won't start, we have the same functionality already in hostapd. Unit
+ * is seconds.
+ */
+#define ATH11K_KEEPALIVE_MIN_IDLE 3747
+#define ATH11K_KEEPALIVE_MAX_IDLE 3895
+#define ATH11K_KEEPALIVE_MAX_UNRESPONSIVE 3900
+
+#define WMI_HOST_RC_DS_FLAG 0x01
+#define WMI_HOST_RC_CW40_FLAG 0x02
+#define WMI_HOST_RC_SGI_FLAG 0x04
+#define WMI_HOST_RC_HT_FLAG 0x08
+#define WMI_HOST_RC_RTSCTS_FLAG 0x10
+#define WMI_HOST_RC_TX_STBC_FLAG 0x20
+#define WMI_HOST_RC_RX_STBC_FLAG 0xC0
+#define WMI_HOST_RC_RX_STBC_FLAG_S 6
+#define WMI_HOST_RC_WEP_TKIP_FLAG 0x100
+#define WMI_HOST_RC_TS_FLAG 0x200
+#define WMI_HOST_RC_UAPSD_FLAG 0x400
+
+#define WMI_HT_CAP_ENABLED 0x0001
+#define WMI_HT_CAP_HT20_SGI 0x0002
+#define WMI_HT_CAP_DYNAMIC_SMPS 0x0004
+#define WMI_HT_CAP_TX_STBC 0x0008
+#define WMI_HT_CAP_TX_STBC_MASK_SHIFT 3
+#define WMI_HT_CAP_RX_STBC 0x0030
+#define WMI_HT_CAP_RX_STBC_MASK_SHIFT 4
+#define WMI_HT_CAP_LDPC 0x0040
+#define WMI_HT_CAP_L_SIG_TXOP_PROT 0x0080
+#define WMI_HT_CAP_MPDU_DENSITY 0x0700
+#define WMI_HT_CAP_MPDU_DENSITY_MASK_SHIFT 8
+#define WMI_HT_CAP_HT40_SGI 0x0800
+#define WMI_HT_CAP_RX_LDPC 0x1000
+#define WMI_HT_CAP_TX_LDPC 0x2000
+#define WMI_HT_CAP_IBF_BFER 0x4000
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_HT_CAP_RX_STBC_1SS 0x0010
+#define WMI_HT_CAP_RX_STBC_2SS 0x0020
+#define WMI_HT_CAP_RX_STBC_3SS 0x0030
+
+#define WMI_HT_CAP_DEFAULT_ALL (WMI_HT_CAP_ENABLED | \
+ WMI_HT_CAP_HT20_SGI | \
+ WMI_HT_CAP_HT40_SGI | \
+ WMI_HT_CAP_TX_STBC | \
+ WMI_HT_CAP_RX_STBC | \
+ WMI_HT_CAP_LDPC)
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_MASK 0x00000003
+#define WMI_VHT_CAP_RX_LDPC 0x00000010
+#define WMI_VHT_CAP_SGI_80MHZ 0x00000020
+#define WMI_VHT_CAP_SGI_160MHZ 0x00000040
+#define WMI_VHT_CAP_TX_STBC 0x00000080
+#define WMI_VHT_CAP_RX_STBC_MASK 0x00000300
+#define WMI_VHT_CAP_RX_STBC_MASK_SHIFT 8
+#define WMI_VHT_CAP_SU_BFER 0x00000800
+#define WMI_VHT_CAP_SU_BFEE 0x00001000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK 0x0000E000
+#define WMI_VHT_CAP_MAX_CS_ANT_MASK_SHIFT 13
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK 0x00070000
+#define WMI_VHT_CAP_MAX_SND_DIM_MASK_SHIFT 16
+#define WMI_VHT_CAP_MU_BFER 0x00080000
+#define WMI_VHT_CAP_MU_BFEE 0x00100000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP 0x03800000
+#define WMI_VHT_CAP_MAX_AMPDU_LEN_EXP_SHIT 23
+#define WMI_VHT_CAP_RX_FIXED_ANT 0x10000000
+#define WMI_VHT_CAP_TX_FIXED_ANT 0x20000000
+
+#define WMI_VHT_CAP_MAX_MPDU_LEN_11454 0x00000002
+
+/* These macros should be used when we wish to advertise STBC support for
+ * only 1SS or 2SS or 3SS.
+ */
+#define WMI_VHT_CAP_RX_STBC_1SS 0x00000100
+#define WMI_VHT_CAP_RX_STBC_2SS 0x00000200
+#define WMI_VHT_CAP_RX_STBC_3SS 0x00000300
+
+#define WMI_VHT_CAP_DEFAULT_ALL (WMI_VHT_CAP_MAX_MPDU_LEN_11454 | \
+ WMI_VHT_CAP_SGI_80MHZ | \
+ WMI_VHT_CAP_TX_STBC | \
+ WMI_VHT_CAP_RX_STBC_MASK | \
+ WMI_VHT_CAP_RX_LDPC | \
+ WMI_VHT_CAP_MAX_AMPDU_LEN_EXP | \
+ WMI_VHT_CAP_RX_FIXED_ANT | \
+ WMI_VHT_CAP_TX_FIXED_ANT)
+
+/* FIXME: should these be in ieee80211.h? */
+#define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK GENMASK(23, 16)
+#define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11 BIT(24)
+
+#define WMI_MAX_SPATIAL_STREAM 3
+
+#define ATH11K_CHAN_WIDTH_NUM 8
+#define ATH11K_BW_NSS_MAP_ENABLE BIT(31)
+#define ATH11K_PEER_RX_NSS_160MHZ GENMASK(2, 0)
+#define ATH11K_PEER_RX_NSS_80_80MHZ GENMASK(5, 3)
+
+#define ATH11K_OBSS_PD_MAX_THRESHOLD -82
+#define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD -62
+#define ATH11K_OBSS_PD_THRESHOLD_IN_DBM BIT(29)
+#define ATH11K_OBSS_PD_SRG_EN BIT(30)
+#define ATH11K_OBSS_PD_NON_SRG_EN BIT(31)
+
+extern const struct htt_rx_ring_tlv_filter ath11k_mac_mon_status_filter_default;
+
+#define ATH11K_SCAN_11D_INTERVAL 600000
+#define ATH11K_11D_INVALID_VDEV_ID 0xFFFF
+
+void ath11k_mac_11d_scan_start(struct ath11k *ar, u32 vdev_id);
+void ath11k_mac_11d_scan_stop(struct ath11k *ar);
+void ath11k_mac_11d_scan_stop_all(struct ath11k_base *ab);
+
+void ath11k_mac_destroy(struct ath11k_base *ab);
+void ath11k_mac_unregister(struct ath11k_base *ab);
+int ath11k_mac_register(struct ath11k_base *ab);
+int ath11k_mac_allocate(struct ath11k_base *ab);
+int ath11k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
+ u16 *rate);
+u8 ath11k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
+ u32 bitrate);
+u8 ath11k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
+ u8 hw_rate, bool cck);
+
+void __ath11k_mac_scan_finish(struct ath11k *ar);
+void ath11k_mac_scan_finish(struct ath11k *ar);
+
+struct ath11k_vif *ath11k_mac_get_arvif(struct ath11k *ar, u32 vdev_id);
+struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
+ u32 vdev_id);
+u8 ath11k_mac_get_target_pdev_id(struct ath11k *ar);
+u8 ath11k_mac_get_target_pdev_id_from_vif(struct ath11k_vif *arvif);
+struct ath11k_vif *ath11k_mac_get_vif_up(struct ath11k_base *ab);
+
+struct ath11k *ath11k_mac_get_ar_by_vdev_id(struct ath11k_base *ab, u32 vdev_id);
+struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id);
+
+void ath11k_mac_drain_tx(struct ath11k *ar);
+void ath11k_mac_peer_cleanup_all(struct ath11k *ar);
+int ath11k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx);
+u8 ath11k_mac_bw_to_mac80211_bw(u8 bw);
+enum nl80211_he_gi ath11k_mac_he_gi_to_nl80211_he_gi(u8 sgi);
+enum nl80211_he_ru_alloc ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc(u16 ru_phy);
+enum nl80211_he_ru_alloc ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones);
+enum ath11k_supported_bw ath11k_mac_mac80211_bw_to_ath11k_bw(enum rate_info_bw bw);
+enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher);
+void ath11k_mac_handle_beacon(struct ath11k *ar, struct sk_buff *skb);
+void ath11k_mac_handle_beacon_miss(struct ath11k *ar, u32 vdev_id);
+void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif);
+int ath11k_mac_wait_tx_complete(struct ath11k *ar);
+int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
new file mode 100644
index 0000000000..3ac689f1de
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
+
+#include "core.h"
+#include "debug.h"
+#include "mhi.h"
+#include "pci.h"
+#include "pcic.h"
+
+#define MHI_TIMEOUT_DEFAULT_MS 20000
+#define RDDM_DUMP_SIZE 0x420000
+
+static struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath11k_mhi_events_qca6390[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config ath11k_mhi_config_qca6390 = {
+ .max_channels = 128,
+ .timeout_ms = 2000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qca6390),
+ .ch_cfg = ath11k_mhi_channels_qca6390,
+ .num_events = ARRAY_SIZE(ath11k_mhi_events_qca6390),
+ .event_cfg = ath11k_mhi_events_qca6390,
+};
+
+static struct mhi_channel_config ath11k_mhi_channels_qcn9074[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 32,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x14,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ },
+};
+
+static struct mhi_event_config ath11k_mhi_events_qcn9074[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .data_type = MHI_ER_CTRL,
+ .mode = MHI_DB_BRST_DISABLE,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config ath11k_mhi_config_qcn9074 = {
+ .max_channels = 30,
+ .timeout_ms = 10000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath11k_mhi_channels_qcn9074),
+ .ch_cfg = ath11k_mhi_channels_qcn9074,
+ .num_events = ARRAY_SIZE(ath11k_mhi_events_qcn9074),
+ .event_cfg = ath11k_mhi_events_qcn9074,
+};
+
+void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
+{
+ u32 val;
+
+ val = ath11k_pcic_read32(ab, MHISTATUS);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "mhistatus 0x%x\n", val);
+
+ /* Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS
+ * has SYSERR bit set and thus need to set MHICTRL_RESET
+ * to clear SYSERR.
+ */
+ ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
+
+ mdelay(10);
+}
+
+static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_TXVECDB, 0);
+}
+
+static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_TXVECSTATUS, 0);
+}
+
+static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_RXVECDB, 0);
+}
+
+static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_RXVECSTATUS, 0);
+}
+
+void ath11k_mhi_clear_vector(struct ath11k_base *ab)
+{
+ ath11k_mhi_reset_txvecdb(ab);
+ ath11k_mhi_reset_txvecstatus(ab);
+ ath11k_mhi_reset_rxvecdb(ab);
+ ath11k_mhi_reset_rxvecstatus(ab);
+}
+
+static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ u32 user_base_data, base_vector;
+ int ret, num_vectors, i;
+ int *irq;
+ unsigned int msi_data;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "MHI", &num_vectors,
+ &user_base_data, &base_vector);
+ if (ret)
+ return ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "num_vectors %d base_vector %d\n",
+ num_vectors, base_vector);
+
+ irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vectors; i++) {
+ msi_data = base_vector;
+
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ msi_data += i;
+
+ irq[i] = ath11k_pci_get_msi_irq(ab, msi_data);
+ }
+
+ ab_pci->mhi_ctrl->irq = irq;
+ ab_pci->mhi_ctrl->nr_irqs = num_vectors;
+
+ return 0;
+}
+
+static int ath11k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static char *ath11k_mhi_op_callback_to_str(enum mhi_callback reason)
+{
+ switch (reason) {
+ case MHI_CB_IDLE:
+ return "MHI_CB_IDLE";
+ case MHI_CB_PENDING_DATA:
+ return "MHI_CB_PENDING_DATA";
+ case MHI_CB_LPM_ENTER:
+ return "MHI_CB_LPM_ENTER";
+ case MHI_CB_LPM_EXIT:
+ return "MHI_CB_LPM_EXIT";
+ case MHI_CB_EE_RDDM:
+ return "MHI_CB_EE_RDDM";
+ case MHI_CB_EE_MISSION_MODE:
+ return "MHI_CB_EE_MISSION_MODE";
+ case MHI_CB_SYS_ERROR:
+ return "MHI_CB_SYS_ERROR";
+ case MHI_CB_FATAL_ERROR:
+ return "MHI_CB_FATAL_ERROR";
+ case MHI_CB_BW_REQ:
+ return "MHI_CB_BW_REQ";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb)
+{
+ struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "notify status reason %s\n",
+ ath11k_mhi_op_callback_to_str(cb));
+
+ switch (cb) {
+ case MHI_CB_SYS_ERROR:
+ ath11k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
+ break;
+ case MHI_CB_EE_RDDM:
+ if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)))
+ queue_work(ab->workqueue_aux, &ab->reset_work);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 *out)
+{
+ *out = readl(addr);
+
+ return 0;
+}
+
+static void ath11k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 val)
+{
+ writel(val, addr);
+}
+
+static int ath11k_mhi_read_addr_from_dt(struct mhi_controller *mhi_ctrl)
+{
+ struct device_node *np;
+ struct resource res;
+ int ret;
+
+ np = of_find_node_by_type(NULL, "memory");
+ if (!np)
+ return -ENOENT;
+
+ ret = of_address_to_resource(np, 0, &res);
+ of_node_put(np);
+ if (ret)
+ return ret;
+
+ mhi_ctrl->iova_start = res.start + 0x1000000;
+ mhi_ctrl->iova_stop = res.end;
+
+ return 0;
+}
+
+int ath11k_mhi_register(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct mhi_controller *mhi_ctrl;
+ struct mhi_controller_config *ath11k_mhi_config;
+ int ret;
+
+ mhi_ctrl = mhi_alloc_controller();
+ if (!mhi_ctrl)
+ return -ENOMEM;
+
+ ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+
+ ab_pci->mhi_ctrl = mhi_ctrl;
+ mhi_ctrl->cntrl_dev = ab->dev;
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ mhi_ctrl->regs = ab->mem;
+ mhi_ctrl->reg_len = ab->mem_len;
+
+ ret = ath11k_mhi_get_msi(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to get msi for mhi\n");
+ goto free_controller;
+ }
+
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ mhi_ctrl->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
+
+ if (test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
+ ret = ath11k_mhi_read_addr_from_dt(mhi_ctrl);
+ if (ret < 0)
+ goto free_controller;
+ } else {
+ mhi_ctrl->iova_start = 0;
+ mhi_ctrl->iova_stop = 0xFFFFFFFF;
+ }
+
+ mhi_ctrl->rddm_size = RDDM_DUMP_SIZE;
+ mhi_ctrl->sbl_size = SZ_512K;
+ mhi_ctrl->seg_len = SZ_512K;
+ mhi_ctrl->fbc_download = true;
+ mhi_ctrl->runtime_get = ath11k_mhi_op_runtime_get;
+ mhi_ctrl->runtime_put = ath11k_mhi_op_runtime_put;
+ mhi_ctrl->status_cb = ath11k_mhi_op_status_cb;
+ mhi_ctrl->read_reg = ath11k_mhi_op_read_reg;
+ mhi_ctrl->write_reg = ath11k_mhi_op_write_reg;
+
+ switch (ab->hw_rev) {
+ case ATH11K_HW_QCN9074_HW10:
+ ath11k_mhi_config = &ath11k_mhi_config_qcn9074;
+ break;
+ case ATH11K_HW_QCA6390_HW20:
+ case ATH11K_HW_WCN6855_HW20:
+ case ATH11K_HW_WCN6855_HW21:
+ ath11k_mhi_config = &ath11k_mhi_config_qca6390;
+ break;
+ default:
+ ath11k_err(ab, "failed assign mhi_config for unknown hw rev %d\n",
+ ab->hw_rev);
+ ret = -EINVAL;
+ goto free_controller;
+ }
+
+ ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
+ if (ret) {
+ ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+ goto free_controller;
+ }
+
+ return 0;
+
+free_controller:
+ mhi_free_controller(mhi_ctrl);
+ ab_pci->mhi_ctrl = NULL;
+ return ret;
+}
+
+void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
+{
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+
+ mhi_unregister_controller(mhi_ctrl);
+ kfree(mhi_ctrl->irq);
+ mhi_free_controller(mhi_ctrl);
+}
+
+int ath11k_mhi_start(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int ret;
+
+ ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
+
+ ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to prepare mhi: %d", ret);
+ return ret;
+ }
+
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to power up mhi: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+{
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+ mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
+}
+
+int ath11k_mhi_suspend(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int ret;
+
+ ret = mhi_pm_suspend(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to suspend mhi: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_mhi_resume(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int ret;
+
+ /* Do force MHI resume as some devices like QCA6390, WCN6855
+ * are not in M3 state but they are functional. So just ignore
+ * the MHI state while resuming.
+ */
+ ret = mhi_pm_resume_force(ab_pci->mhi_ctrl);
+ if (ret) {
+ ath11k_warn(ab, "failed to resume mhi: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
new file mode 100644
index 0000000000..8d9f852da6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+#ifndef _ATH11K_MHI_H
+#define _ATH11K_MHI_H
+
+#include "pci.h"
+
+#define PCIE_TXVECDB 0x360
+#define PCIE_TXVECSTATUS 0x368
+#define PCIE_RXVECDB 0x394
+#define PCIE_RXVECSTATUS 0x39C
+
+#define MHISTATUS 0x48
+#define MHICTRL 0x38
+#define MHICTRL_RESET_MASK 0x2
+
+int ath11k_mhi_start(struct ath11k_pci *ar_pci);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
+int ath11k_mhi_register(struct ath11k_pci *ar_pci);
+void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
+void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
+void ath11k_mhi_clear_vector(struct ath11k_base *ab);
+
+int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
+int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
new file mode 100644
index 0000000000..09e65c5e55
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -0,0 +1,1048 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+#include <linux/of.h>
+
+#include "pci.h"
+#include "core.h"
+#include "hif.h"
+#include "mhi.h"
+#include "debug.h"
+#include "pcic.h"
+#include "qmi.h"
+
+#define ATH11K_PCI_BAR_NUM 0
+#define ATH11K_PCI_DMA_MASK 32
+
+#define TCSR_SOC_HW_VERSION 0x0224
+#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
+#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
+
+#define QCA6390_DEVICE_ID 0x1101
+#define QCN9074_DEVICE_ID 0x1104
+#define WCN6855_DEVICE_ID 0x1103
+
+static const struct pci_device_id ath11k_pci_id_table[] = {
+ { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
+ { PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
+
+static int ath11k_pci_bus_wake_up(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+static void ath11k_pci_bus_release(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset)
+{
+ if (!ab->hw_params.static_window_map)
+ return ATH11K_PCI_WINDOW_START;
+
+ if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within DP register range, use 3rd window */
+ return 3 * ATH11K_PCI_WINDOW_START;
+ else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
+ ATH11K_PCI_WINDOW_RANGE_MASK)
+ /* if offset lies within CE register range, use 2nd window */
+ return 2 * ATH11K_PCI_WINDOW_START;
+ else
+ return ATH11K_PCI_WINDOW_START;
+}
+
+static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset);
+
+ lockdep_assert_held(&ab_pci->window_lock);
+
+ if (window != ab_pci->register_window) {
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
+ ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+ ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+ ab_pci->register_window = window;
+ }
+}
+
+static void
+ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ u32 window_start;
+
+ window_start = ath11k_pci_get_window_start(ab, offset);
+
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ iowrite32(value, ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ }
+}
+
+static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ u32 window_start, val;
+
+ window_start = ath11k_pci_get_window_start(ab, offset);
+
+ if (window_start == ATH11K_PCI_WINDOW_START) {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ } else {
+ val = ioread32(ab->mem + window_start +
+ (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
+ }
+
+ return val;
+}
+
+int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
+{
+ struct pci_dev *pci_dev = to_pci_dev(ab->dev);
+
+ return pci_irq_vector(pci_dev, vector);
+}
+
+static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
+ .wakeup = ath11k_pci_bus_wake_up,
+ .release = ath11k_pci_bus_release,
+ .get_msi_irq = ath11k_pci_get_msi_irq,
+ .window_write32 = ath11k_pci_window_write32,
+ .window_read32 = ath11k_pci_window_read32,
+};
+
+static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
+ .wakeup = NULL,
+ .release = NULL,
+ .get_msi_irq = ath11k_pci_get_msi_irq,
+ .window_write32 = ath11k_pci_window_write32,
+ .window_read32 = ath11k_pci_window_read32,
+};
+
+static const struct ath11k_msi_config msi_config_one_msi = {
+ .total_vectors = 1,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 1, .base_vector = 0 },
+ },
+};
+
+static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
+{
+ u32 umac_window;
+ u32 ce_window;
+ u32 window;
+
+ umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
+ ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
+ window = (umac_window << 12) | (ce_window << 6);
+
+ iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
+ ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
+}
+
+static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
+{
+ u32 val, delay;
+
+ val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
+
+ val |= PCIE_SOC_GLOBAL_RESET_V;
+
+ ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ /* TODO: exact time to sleep is uncertain */
+ delay = 10;
+ mdelay(delay);
+
+ /* Need to toggle V bit back otherwise stuck in reset status */
+ val &= ~PCIE_SOC_GLOBAL_RESET_V;
+
+ ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ mdelay(delay);
+
+ val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ if (val == 0xffffffff)
+ ath11k_warn(ab, "link down error during global reset\n");
+}
+
+static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
+{
+ u32 val;
+
+ /* read cookie */
+ val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_q6_cookie_addr 0x%x\n", val);
+
+ val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
+
+ /* TODO: exact time to sleep is uncertain */
+ mdelay(10);
+
+ /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
+ * continuing warm path and entering dead loop.
+ */
+ ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0);
+ mdelay(10);
+
+ val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
+
+ /* A read clear register. clear the register to prevent
+ * Q6 from entering wrong code path.
+ */
+ val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause %d\n", val);
+}
+
+static int ath11k_pci_set_link_reg(struct ath11k_base *ab,
+ u32 offset, u32 value, u32 mask)
+{
+ u32 v;
+ int i;
+
+ v = ath11k_pcic_read32(ab, offset);
+ if ((v & mask) == value)
+ return 0;
+
+ for (i = 0; i < 10; i++) {
+ ath11k_pcic_write32(ab, offset, (v & ~mask) | value);
+
+ v = ath11k_pcic_read32(ab, offset);
+ if ((v & mask) == value)
+ return 0;
+
+ mdelay(2);
+ }
+
+ ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n",
+ offset, v & mask, value);
+
+ return -ETIMEDOUT;
+}
+
+static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_pci_set_link_reg(ab,
+ PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab),
+ PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
+ PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
+ if (ret) {
+ ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_pci_set_link_reg(ab,
+ PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab),
+ PCIE_PCS_OSC_DTCT_CONFIG1_VAL,
+ PCIE_PCS_OSC_DTCT_CONFIG_MSK);
+ if (ret) {
+ ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_pci_set_link_reg(ab,
+ PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab),
+ PCIE_PCS_OSC_DTCT_CONFIG2_VAL,
+ PCIE_PCS_OSC_DTCT_CONFIG_MSK);
+ if (ret) {
+ ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_pci_set_link_reg(ab,
+ PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab),
+ PCIE_PCS_OSC_DTCT_CONFIG4_VAL,
+ PCIE_PCS_OSC_DTCT_CONFIG_MSK);
+ if (ret) {
+ ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
+{
+ u32 val;
+ int i;
+
+ val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
+
+ /* PCIE link seems very unstable after the Hot Reset*/
+ for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
+ if (val == 0xffffffff)
+ mdelay(5);
+
+ ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
+ val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "ltssm 0x%x\n", val);
+
+ val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ val |= GCC_GCC_PCIE_HOT_RST_VAL;
+ ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
+ val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_hot_rst 0x%x\n", val);
+
+ mdelay(5);
+}
+
+static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
+{
+ /* This is a WAR for PCIE Hotreset.
+ * When target receive Hotreset, but will set the interrupt.
+ * So when download SBL again, SBL will open Interrupt and
+ * receive it, and crash immediately.
+ */
+ ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
+}
+
+static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab)
+{
+ u32 val;
+
+ val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
+ val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
+ ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
+}
+
+static void ath11k_pci_force_wake(struct ath11k_base *ab)
+{
+ ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
+ mdelay(5);
+}
+
+static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
+{
+ mdelay(100);
+
+ if (power_on) {
+ ath11k_pci_enable_ltssm(ab);
+ ath11k_pci_clear_all_intrs(ab);
+ ath11k_pci_set_wlaon_pwr_ctrl(ab);
+ if (ab->hw_params.fix_l1ss)
+ ath11k_pci_fix_l1ss(ab);
+ }
+
+ ath11k_mhi_clear_vector(ab);
+ ath11k_pci_clear_dbg_registers(ab);
+ ath11k_pci_soc_global_reset(ab);
+ ath11k_mhi_set_mhictrl_reset(ab);
+}
+
+static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
+{
+ struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce = ab->hw_params.target_ce_config;
+ cfg->tgt_ce_len = ab->hw_params.target_ce_count;
+
+ cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
+ cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
+ ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
+
+ ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
+ &cfg->shadow_reg_v2_len);
+}
+
+static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
+{
+ struct pci_dev *dev = ab_pci->pdev;
+ u16 control;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ else
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+}
+
+static void ath11k_pci_msi_enable(struct ath11k_pci *ab_pci)
+{
+ ath11k_pci_msi_config(ab_pci, true);
+}
+
+static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci)
+{
+ ath11k_pci_msi_config(ab_pci, false);
+}
+
+static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
+ struct pci_dev *pci_dev = ab_pci->pdev;
+ struct msi_desc *msi_desc;
+ int num_vectors;
+ int ret;
+
+ num_vectors = pci_alloc_irq_vectors(pci_dev,
+ msi_config->total_vectors,
+ msi_config->total_vectors,
+ PCI_IRQ_MSI);
+ if (num_vectors == msi_config->total_vectors) {
+ set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+ } else {
+ num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ 1,
+ 1,
+ PCI_IRQ_MSI);
+ if (num_vectors < 0) {
+ ret = -EINVAL;
+ goto reset_msi_config;
+ }
+ clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
+ ab->pci.msi.config = &msi_config_one_msi;
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "request one msi vector\n");
+ }
+ ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
+
+ ath11k_pci_msi_disable(ab_pci);
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath11k_err(ab, "msi_desc is NULL!\n");
+ ret = -EINVAL;
+ goto free_msi_vector;
+ }
+
+ ab->pci.msi.ep_base_data = msi_desc->msg.data;
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ &ab->pci.msi.addr_lo);
+
+ if (msi_desc->pci.msi_attrib.is_64) {
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ &ab->pci.msi.addr_hi);
+ } else {
+ ab->pci.msi.addr_hi = 0;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data);
+
+ return 0;
+
+free_msi_vector:
+ pci_free_irq_vectors(ab_pci->pdev);
+
+reset_msi_config:
+ return ret;
+}
+
+static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci)
+{
+ pci_free_irq_vectors(ab_pci->pdev);
+}
+
+static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
+{
+ struct msi_desc *msi_desc;
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath11k_err(ab_pci->ab, "msi_desc is NULL!\n");
+ pci_free_irq_vectors(ab_pci->pdev);
+ return -EINVAL;
+ }
+
+ ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data;
+
+ ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "after request_irq msi_ep_base_data %d\n",
+ ab_pci->ab->pci.msi.ep_base_data);
+
+ return 0;
+}
+
+static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ u16 device_id;
+ int ret = 0;
+
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ if (device_id != ab_pci->dev_id) {
+ ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
+ device_id, ab_pci->dev_id);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
+ if (ret) {
+ ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath11k_err(ab, "failed to enable pci device: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
+ if (ret) {
+ ath11k_err(ab, "failed to request pci region: %d\n", ret);
+ goto disable_device;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev,
+ DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
+ ATH11K_PCI_DMA_MASK, ret);
+ goto release_region;
+ }
+
+ pci_set_master(pdev);
+
+ ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
+ ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
+ if (!ab->mem) {
+ ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
+ ret = -EIO;
+ goto release_region;
+ }
+
+ ab->mem_ce = ab->mem;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci_mem 0x%p\n", ab->mem);
+ return 0;
+
+release_region:
+ pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
+disable_device:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct pci_dev *pci_dev = ab_pci->pdev;
+
+ pci_iounmap(pci_dev, ab->mem);
+ ab->mem = NULL;
+ pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ PCI_EXP_LNKCTL_ASPMC,
+ ab_pci->link_ctl &
+ PCI_EXP_LNKCTL_ASPMC);
+}
+
+static int ath11k_pci_power_up(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ int ret;
+
+ ab_pci->register_window = 0;
+ clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
+ ath11k_pci_sw_reset(ab_pci->ab, true);
+
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath11k_pci_aspm_disable(ab_pci);
+
+ ath11k_pci_msi_enable(ab_pci);
+
+ ret = ath11k_mhi_start(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to start mhi: %d\n", ret);
+ return ret;
+ }
+
+ if (ab->hw_params.static_window_map)
+ ath11k_pci_select_static_window(ab_pci);
+
+ return 0;
+}
+
+static void ath11k_pci_power_down(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ /* restore aspm in case firmware bootup fails */
+ ath11k_pci_aspm_restore(ab_pci);
+
+ ath11k_pci_force_wake(ab_pci->ab);
+
+ ath11k_pci_msi_disable(ab_pci);
+
+ ath11k_mhi_stop(ab_pci);
+ clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
+ ath11k_pci_sw_reset(ab_pci->ab, false);
+}
+
+static int ath11k_pci_hif_suspend(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
+
+ return ath11k_mhi_suspend(ar_pci);
+}
+
+static int ath11k_pci_hif_resume(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
+
+ return ath11k_mhi_resume(ar_pci);
+}
+
+static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irqs_enable(ab);
+}
+
+static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irq_disable_sync(ab);
+}
+
+static int ath11k_pci_start(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ /* TODO: for now don't restore ASPM in case of single MSI
+ * vector as MHI register reading in M2 causes system hang.
+ */
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ ath11k_pci_aspm_restore(ab_pci);
+ else
+ ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
+
+ ath11k_pcic_start(ab);
+
+ return 0;
+}
+
+static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
+ .start = ath11k_pci_start,
+ .stop = ath11k_pcic_stop,
+ .read32 = ath11k_pcic_read32,
+ .write32 = ath11k_pcic_write32,
+ .read = ath11k_pcic_read,
+ .power_down = ath11k_pci_power_down,
+ .power_up = ath11k_pci_power_up,
+ .suspend = ath11k_pci_hif_suspend,
+ .resume = ath11k_pci_hif_resume,
+ .irq_enable = ath11k_pcic_ext_irq_enable,
+ .irq_disable = ath11k_pcic_ext_irq_disable,
+ .get_msi_address = ath11k_pcic_get_msi_address,
+ .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
+ .ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
+ .ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
+ .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
+};
+
+static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
+{
+ u32 soc_hw_version;
+
+ soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION);
+ *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
+ soc_hw_version);
+ *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
+ soc_hw_version);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "tcsr_soc_hw_version major %d minor %d\n",
+ *major, *minor);
+}
+
+static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
+ const struct cpumask *m)
+{
+ if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
+ return 0;
+
+ return irq_set_affinity_hint(ab_pci->pdev->irq, m);
+}
+
+static int ath11k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ struct ath11k_base *ab;
+ struct ath11k_pci *ab_pci;
+ u32 soc_hw_version_major, soc_hw_version_minor, addr;
+ const struct ath11k_pci_ops *pci_ops;
+ int ret;
+
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
+
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath11k base\n");
+ return -ENOMEM;
+ }
+
+ ab->dev = &pdev->dev;
+ pci_set_drvdata(pdev, ab);
+ ab_pci = ath11k_pci_priv(ab);
+ ab_pci->dev_id = pci_dev->device;
+ ab_pci->ab = ab;
+ ab_pci->pdev = pdev;
+ ab->hif.ops = &ath11k_pci_hif_ops;
+ ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
+ pci_set_drvdata(pdev, ab);
+ spin_lock_init(&ab_pci->window_lock);
+
+ /* Set fixed_mem_region to true for platforms support reserved memory
+ * from DT. If memory is reserved from DT for FW, ath11k driver need not
+ * allocate memory.
+ */
+ ret = of_property_read_u32(ab->dev->of_node, "memory-region", &addr);
+ if (!ret)
+ set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
+
+ ret = ath11k_pci_claim(ab_pci, pdev);
+ if (ret) {
+ ath11k_err(ab, "failed to claim device: %d\n", ret);
+ goto err_free_core;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
+ pdev->vendor, pdev->device,
+ pdev->subsystem_vendor, pdev->subsystem_device);
+
+ ab->id.vendor = pdev->vendor;
+ ab->id.device = pdev->device;
+ ab->id.subsystem_vendor = pdev->subsystem_vendor;
+ ab->id.subsystem_device = pdev->subsystem_device;
+
+ switch (pci_dev->device) {
+ case QCA6390_DEVICE_ID:
+ ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ switch (soc_hw_version_major) {
+ case 2:
+ ab->hw_rev = ATH11K_HW_QCA6390_HW20;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ pci_ops = &ath11k_pci_ops_qca6390;
+ break;
+ case QCN9074_DEVICE_ID:
+ pci_ops = &ath11k_pci_ops_qcn9074;
+ ab->hw_rev = ATH11K_HW_QCN9074_HW10;
+ break;
+ case WCN6855_DEVICE_ID:
+ ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
+ ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
+ &soc_hw_version_minor);
+ switch (soc_hw_version_major) {
+ case 2:
+ switch (soc_hw_version_minor) {
+ case 0x00:
+ case 0x01:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW20;
+ break;
+ case 0x10:
+ case 0x11:
+ ab->hw_rev = ATH11K_HW_WCN6855_HW21;
+ break;
+ default:
+ goto unsupported_wcn6855_soc;
+ }
+ break;
+ default:
+unsupported_wcn6855_soc:
+ dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ pci_ops = &ath11k_pci_ops_qca6390;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
+ pci_dev->device);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_pcic_register_pci_ops(ab, pci_ops);
+ if (ret) {
+ ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_pcic_init_msi_config(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init msi config: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_pci_alloc_msi(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to enable msi: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_core_pre_init(ab);
+ if (ret)
+ goto err_pci_disable_msi;
+
+ ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
+ if (ret) {
+ ath11k_err(ab, "failed to set irq affinity %d\n", ret);
+ goto err_pci_disable_msi;
+ }
+
+ ret = ath11k_mhi_register(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to register mhi: %d\n", ret);
+ goto err_irq_affinity_cleanup;
+ }
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_mhi_unregister;
+
+ ret = ath11k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath11k_pci_init_qmi_ce_config(ab);
+
+ ret = ath11k_pcic_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to config irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ /* kernel may allocate a dummy vector before request_irq and
+ * then allocate a real vector when request_irq is called.
+ * So get msi_data here again to avoid spurious interrupt
+ * as msi_data will configured to srngs.
+ */
+ ret = ath11k_pci_config_msi_data(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to config msi_data: %d\n", ret);
+ goto err_free_irq;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+ goto err_free_irq;
+ }
+ ath11k_qmi_fwreset_from_cold_boot(ab);
+ return 0;
+
+err_free_irq:
+ ath11k_pcic_free_irq(ab);
+
+err_ce_free:
+ ath11k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+
+err_mhi_unregister:
+ ath11k_mhi_unregister(ab_pci);
+
+err_irq_affinity_cleanup:
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+
+err_pci_disable_msi:
+ ath11k_pci_free_msi(ab_pci);
+
+err_pci_free_region:
+ ath11k_pci_free_region(ab_pci);
+
+err_free_core:
+ ath11k_core_free(ab);
+
+ return ret;
+}
+
+static void ath11k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = pci_get_drvdata(pdev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_pci_power_down(ab);
+ ath11k_debugfs_soc_destroy(ab);
+ ath11k_qmi_deinit_service(ab);
+ goto qmi_fail;
+ }
+
+ set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+
+ ath11k_core_deinit(ab);
+
+qmi_fail:
+ ath11k_mhi_unregister(ab_pci);
+
+ ath11k_pcic_free_irq(ab);
+ ath11k_pci_free_msi(ab_pci);
+ ath11k_pci_free_region(ab_pci);
+
+ ath11k_hal_srng_deinit(ab);
+ ath11k_ce_free_pipes(ab);
+ ath11k_core_free(ab);
+}
+
+static void ath11k_pci_shutdown(struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = pci_get_drvdata(pdev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
+ ath11k_pci_power_down(ab);
+}
+
+static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
+ return 0;
+ }
+
+ ret = ath11k_core_suspend(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to suspend core: %d\n", ret);
+
+ return 0;
+}
+
+static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
+ return 0;
+ }
+
+ ret = ath11k_core_resume(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to resume core: %d\n", ret);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
+ ath11k_pci_pm_suspend,
+ ath11k_pci_pm_resume);
+
+static struct pci_driver ath11k_pci_driver = {
+ .name = "ath11k_pci",
+ .id_table = ath11k_pci_id_table,
+ .probe = ath11k_pci_probe,
+ .remove = ath11k_pci_remove,
+ .shutdown = ath11k_pci_shutdown,
+#ifdef CONFIG_PM
+ .driver.pm = &ath11k_pci_pm_ops,
+#endif
+};
+
+static int ath11k_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath11k_pci_driver);
+ if (ret)
+ pr_err("failed to register ath11k pci driver: %d\n",
+ ret);
+
+ return ret;
+}
+module_init(ath11k_pci_init);
+
+static void ath11k_pci_exit(void)
+{
+ pci_unregister_driver(&ath11k_pci_driver);
+}
+
+module_exit(ath11k_pci_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11ax WLAN devices");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* firmware files */
+MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/*");
+MODULE_FIRMWARE(ATH11K_FW_DIR "/QCN9074/hw1.0/*");
+MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.0/*");
+MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.1/*");
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
new file mode 100644
index 0000000000..e9a01f344e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef _ATH11K_PCI_H
+#define _ATH11K_PCI_H
+
+#include <linux/mhi.h>
+
+#include "core.h"
+
+#define PCIE_SOC_GLOBAL_RESET 0x3008
+#define PCIE_SOC_GLOBAL_RESET_V 1
+
+#define WLAON_WARM_SW_ENTRY 0x1f80504
+#define WLAON_SOC_RESET_CAUSE_REG 0x01f8060c
+
+#define PCIE_Q6_COOKIE_ADDR 0x01f80500
+#define PCIE_Q6_COOKIE_DATA 0xc0000000
+
+/* register to wake the UMAC from power collapse */
+#define PCIE_SCRATCH_0_SOC_PCIE_REG 0x4040
+
+/* register used for handshake mechanism to validate UMAC is awake */
+#define PCIE_SOC_WAKE_PCIE_LOCAL_REG 0x3004
+
+#define PCIE_PCIE_PARF_LTSSM 0x1e081b0
+#define PARM_LTSSM_VALUE 0x111
+
+#define GCC_GCC_PCIE_HOT_RST 0x1e402bc
+#define GCC_GCC_PCIE_HOT_RST_VAL 0x10
+
+#define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228
+#define PCIE_SMLH_REQ_RST_LINK_DOWN 0x2
+#define PCIE_INT_CLEAR_ALL 0xffffffff
+
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(x) \
+ (ab->hw_params.regs->pcie_qserdes_sysclk_en_sel)
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL 0x10
+#define PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK 0xffffffff
+#define PCIE_PCS_OSC_DTCT_CONFIG1_REG(x) \
+ (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base)
+#define PCIE_PCS_OSC_DTCT_CONFIG1_VAL 0x02
+#define PCIE_PCS_OSC_DTCT_CONFIG2_REG(x) \
+ (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0x4)
+#define PCIE_PCS_OSC_DTCT_CONFIG2_VAL 0x52
+#define PCIE_PCS_OSC_DTCT_CONFIG4_REG(x) \
+ (ab->hw_params.regs->pcie_pcs_osc_dtct_config_base + 0xc)
+#define PCIE_PCS_OSC_DTCT_CONFIG4_VAL 0xff
+#define PCIE_PCS_OSC_DTCT_CONFIG_MSK 0x000000ff
+
+#define WLAON_QFPROM_PWR_CTRL_REG 0x01f8031c
+#define QFPROM_PWR_CTRL_VDD4BLOW_MASK 0x4
+
+enum ath11k_pci_flags {
+ ATH11K_PCI_ASPM_RESTORE,
+};
+
+struct ath11k_pci {
+ struct pci_dev *pdev;
+ struct ath11k_base *ab;
+ u16 dev_id;
+ char amss_path[100];
+ struct mhi_controller *mhi_ctrl;
+ const struct ath11k_msi_config *msi_config;
+ u32 register_window;
+
+ /* protects register_window above */
+ spinlock_t window_lock;
+
+ /* enum ath11k_pci_flags */
+ unsigned long flags;
+ u16 link_ctl;
+};
+
+static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
+{
+ return (struct ath11k_pci *)ab->drv_priv;
+}
+
+int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
new file mode 100644
index 0000000000..c63083633b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "pcic.h"
+#include "debug.h"
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "bhi",
+ "mhi-er0",
+ "mhi-er1",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+static const struct ath11k_msi_config ath11k_msi_config[] = {
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_QCA6390_HW20,
+ },
+ {
+ .total_vectors = 16,
+ .total_users = 3,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 5, .base_vector = 3 },
+ { .name = "DP", .num_vectors = 8, .base_vector = 8 },
+ },
+ .hw_rev = ATH11K_HW_QCN9074_HW10,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_WCN6855_HW20,
+ },
+ {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+ .hw_rev = ATH11K_HW_WCN6855_HW21,
+ },
+ {
+ .total_vectors = 28,
+ .total_users = 2,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "CE", .num_vectors = 10, .base_vector = 0 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 10 },
+ },
+ .hw_rev = ATH11K_HW_WCN6750_HW10,
+ },
+};
+
+int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
+{
+ const struct ath11k_msi_config *msi_config;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) {
+ msi_config = &ath11k_msi_config[i];
+
+ if (msi_config->hw_rev == ab->hw_rev)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ath11k_msi_config)) {
+ ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n",
+ ab->hw_rev);
+ return -EINVAL;
+ }
+
+ ab->pci.msi.config = msi_config;
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_init_msi_config);
+
+static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ if (offset < ATH11K_PCI_WINDOW_START)
+ iowrite32(value, ab->mem + offset);
+ else
+ ab->pci.ops->window_write32(ab, offset, value);
+}
+
+void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ int ret = 0;
+ bool wakeup_required;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
+ ret = ab->pci.ops->wakeup(ab);
+
+ __ath11k_pcic_write32(ab, offset, value);
+
+ if (wakeup_required && !ret && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_write32);
+
+static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
+{
+ u32 val;
+
+ if (offset < ATH11K_PCI_WINDOW_START)
+ val = ioread32(ab->mem + offset);
+ else
+ val = ab->pci.ops->window_read32(ab, offset);
+
+ return val;
+}
+
+u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset)
+{
+ int ret = 0;
+ u32 val;
+ bool wakeup_required;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup)
+ ret = ab->pci.ops->wakeup(ab);
+
+ val = __ath11k_pcic_read32(ab, offset);
+
+ if (wakeup_required && !ret && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+
+ return val;
+}
+EXPORT_SYMBOL(ath11k_pcic_read32);
+
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end)
+{
+ int ret = 0;
+ bool wakeup_required;
+ u32 *data = buf;
+ u32 i;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup the device to access.
+ */
+ wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) &&
+ end >= ATH11K_PCI_ACCESS_ALWAYS_OFF;
+ if (wakeup_required && ab->pci.ops->wakeup) {
+ ret = ab->pci.ops->wakeup(ab);
+ if (ret) {
+ ath11k_warn(ab,
+ "wakeup failed, data may be invalid: %d",
+ ret);
+ /* Even though wakeup() failed, continue processing rather
+ * than returning because some parts of the data may still
+ * be valid and useful in some cases, e.g. could give us
+ * some clues on firmware crash.
+ * Mislead due to invalid data could be avoided because we
+ * are aware of the wakeup failure.
+ */
+ }
+ }
+
+ for (i = start; i < end + 1; i += 4)
+ *data++ = __ath11k_pcic_read32(ab, i);
+
+ if (wakeup_required && ab->pci.ops->release)
+ ab->pci.ops->release(ab);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_read);
+
+void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ *msi_addr_lo = ab->pci.msi.addr_lo;
+ *msi_addr_hi = ab->pci.msi.addr_hi;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_msi_address);
+
+int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
+ int idx;
+
+ for (idx = 0; idx < msi_config->total_users; idx++) {
+ if (strcmp(user_name, msi_config->users[idx].name) == 0) {
+ *num_vectors = msi_config->users[idx].num_vectors;
+ *base_vector = msi_config->users[idx].base_vector;
+ *user_base_data = *base_vector + ab->pci.msi.ep_base_data;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "msi assignment %s num_vectors %d user_base_data %u base_vector %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment);
+
+void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx)
+{
+ u32 i, msi_data_idx;
+
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ if (ce_id == i)
+ break;
+
+ msi_data_idx++;
+ }
+ *msi_idx = msi_data_idx;
+}
+EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx);
+
+static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ }
+}
+
+void ath11k_pcic_free_irq(struct ath11k_base *ab)
+{
+ int i, irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_pcic_free_ext_irq(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_free_irq);
+
+static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ enable_irq(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pcic_ce_irq_disable(ab, i);
+ }
+}
+
+static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t)
+{
+ struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ enable_irq(ce_pipe->ab->irq_num[irq_idx]);
+}
+
+static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+ struct ath11k_base *ab = ce_pipe->ab;
+ int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
+
+ if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
+
+ /* last interrupt received for this CE */
+ ce_pipe->timestamp = jiffies;
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ /* In case of one MSI vector, we handle irq enable/disable
+ * in a uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *sc)
+{
+ int i;
+
+ clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &sc->dev_flags);
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+
+ ath11k_pcic_ext_grp_disable(irq_grp);
+
+ if (irq_grp->napi_enabled) {
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ irq_grp->napi_enabled = false;
+ }
+ }
+}
+
+static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ /* In case of one MSI vector, we handle irq enable/disable in a
+ * uniform way since we only have one irq
+ */
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ return;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ if (!irq_grp->napi_enabled) {
+ napi_enable(&irq_grp->napi);
+ irq_grp->napi_enabled = true;
+ }
+ ath11k_pcic_ext_grp_enable(irq_grp);
+ }
+}
+EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable);
+
+static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j, irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_pcic_ext_irq_disable(ab);
+ ath11k_pcic_sync_ext_irqs(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable);
+
+static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+ int i;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+ struct ath11k_base *ab = irq_grp->ab;
+ int i;
+
+ if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return IRQ_HANDLED;
+
+ ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq);
+
+ /* last interrupt received for this group */
+ irq_grp->timestamp = jiffies;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int
+ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
+{
+ return ab->pci.ops->get_msi_irq(ab, vector);
+}
+
+static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
+{
+ int i, j, ret, num_vectors = 0;
+ u32 user_base_data = 0, base_vector = 0;
+ unsigned long irq_flags;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
+ &user_base_data,
+ &base_vector);
+ if (ret < 0)
+ return ret;
+
+ irq_flags = IRQF_SHARED;
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ irq_flags |= IRQF_NOBALANCING;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_pcic_ext_grp_napi_poll);
+
+ if (ab->hw_params.ring_mask->tx[i] ||
+ ab->hw_params.ring_mask->rx[i] ||
+ ab->hw_params.ring_mask->rx_err[i] ||
+ ab->hw_params.ring_mask->rx_wbm_rel[i] ||
+ ab->hw_params.ring_mask->reo_status[i] ||
+ ab->hw_params.ring_mask->rxdma2host[i] ||
+ ab->hw_params.ring_mask->host2rxdma[i] ||
+ ab->hw_params.ring_mask->rx_mon_status[i]) {
+ num_irq = 1;
+ }
+
+ irq_grp->num_irq = num_irq;
+ irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+ int vector = (i % num_vectors) + base_vector;
+ int irq = ath11k_pcic_get_msi_irq(ab, vector);
+
+ if (irq < 0)
+ return irq;
+
+ ab->irq_num[irq_idx] = irq;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "irq %d group %d\n", irq, i);
+
+ irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
+ ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
+ irq_flags, "DP_EXT_IRQ", irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request irq %d: %d\n",
+ vector, ret);
+ return ret;
+ }
+ }
+ ath11k_pcic_ext_grp_disable(irq_grp);
+ }
+
+ return 0;
+}
+
+int ath11k_pcic_config_irq(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *ce_pipe;
+ u32 msi_data_start;
+ u32 msi_data_count, msi_data_idx;
+ u32 msi_irq_start;
+ unsigned int msi_data;
+ int irq, i, ret, irq_idx;
+ unsigned long irq_flags;
+
+ ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count,
+ &msi_data_start, &msi_irq_start);
+ if (ret)
+ return ret;
+
+ irq_flags = IRQF_SHARED;
+ if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
+ irq_flags |= IRQF_NOBALANCING;
+
+ /* Configure CE irqs */
+ for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
+ irq = ath11k_pcic_get_msi_irq(ab, msi_data);
+ if (irq < 0)
+ return irq;
+
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
+
+ ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
+ irq_flags, irq_name[irq_idx], ce_pipe);
+ if (ret) {
+ ath11k_err(ab, "failed to request irq %d: %d\n",
+ irq_idx, ret);
+ return ret;
+ }
+
+ ab->irq_num[irq_idx] = irq;
+ msi_data_idx++;
+
+ ath11k_pcic_ce_irq_disable(ab, i);
+ }
+
+ ret = ath11k_pcic_ext_irq_config(ab);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_config_irq);
+
+void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pcic_ce_irq_enable(ab, i);
+ }
+}
+EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable);
+
+static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irqs_disable(ab);
+ ath11k_pcic_sync_ce_irqs(ab);
+ ath11k_pcic_kill_tasklets(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync);
+
+void ath11k_pcic_stop(struct ath11k_base *ab)
+{
+ ath11k_pcic_ce_irq_disable_sync(ab);
+ ath11k_ce_cleanup_pipes(ab);
+}
+EXPORT_SYMBOL(ath11k_pcic_stop);
+
+int ath11k_pcic_start(struct ath11k_base *ab)
+{
+ set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
+
+ ath11k_pcic_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_start);
+
+int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe);
+
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops)
+{
+ if (!pci_ops)
+ return 0;
+
+ /* Return error if mandatory pci_ops callbacks are missing */
+ if (!pci_ops->get_msi_irq || !pci_ops->window_write32 ||
+ !pci_ops->window_read32)
+ return -EINVAL;
+
+ ab->pci.ops = pci_ops;
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_pcic_register_pci_ops);
+
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+ ath11k_pcic_ce_irq_enable(ab, i);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq);
+
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+ struct ath11k_ce_pipe *ce_pipe;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ ce_pipe = &ab->ce.ce_pipe[i];
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR ||
+ i == ATH11K_PCI_CE_WAKE_IRQ)
+ continue;
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+ synchronize_irq(ab->irq_num[irq_idx]);
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq);
diff --git a/drivers/net/wireless/ath/ath11k/pcic.h b/drivers/net/wireless/ath/ath11k/pcic.h
new file mode 100644
index 0000000000..ac012e88bf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pcic.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH11K_PCI_CMN_H
+#define _ATH11K_PCI_CMN_H
+
+#include "core.h"
+
+#define ATH11K_PCI_IRQ_CE0_OFFSET 3
+#define ATH11K_PCI_IRQ_DP_OFFSET 14
+
+#define ATH11K_PCI_CE_WAKE_IRQ 2
+
+#define ATH11K_PCI_WINDOW_ENABLE_BIT 0x40000000
+#define ATH11K_PCI_WINDOW_REG_ADDRESS 0x310c
+#define ATH11K_PCI_WINDOW_VALUE_MASK GENMASK(24, 19)
+#define ATH11K_PCI_WINDOW_START 0x80000
+#define ATH11K_PCI_WINDOW_RANGE_MASK GENMASK(18, 0)
+
+/* BAR0 + 4k is always accessible, and no
+ * need to force wakeup.
+ * 4K - 32 = 0xFE0
+ */
+#define ATH11K_PCI_ACCESS_ALWAYS_OFF 0xFE0
+
+int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value);
+u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset);
+void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
+void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
+void ath11k_pcic_free_irq(struct ath11k_base *ab);
+int ath11k_pcic_config_irq(struct ath11k_base *ab);
+void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab);
+void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab);
+void ath11k_pcic_stop(struct ath11k_base *ab);
+int ath11k_pcic_start(struct ath11k_base *ab);
+int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab);
+void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab);
+int ath11k_pcic_init_msi_config(struct ath11k_base *ab);
+int ath11k_pcic_register_pci_ops(struct ath11k_base *ab,
+ const struct ath11k_pci_ops *pci_ops);
+int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end);
+void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
new file mode 100644
index 0000000000..114aa3a9a3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -0,0 +1,670 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+
+static struct ath11k_peer *ath11k_peer_find_list_by_id(struct ath11k_base *ab,
+ int peer_id)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->peer_id != peer_id)
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+ if (!ether_addr_equal(peer->addr, addr))
+ continue;
+
+ return peer;
+ }
+
+ return NULL;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (!ab->rhead_peer_addr)
+ return NULL;
+
+ peer = rhashtable_lookup_fast(ab->rhead_peer_addr, addr,
+ ab->rhash_peer_addr_param);
+
+ return peer;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
+ int peer_id)
+{
+ struct ath11k_peer *peer;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (!ab->rhead_peer_id)
+ return NULL;
+
+ peer = rhashtable_lookup_fast(ab->rhead_peer_id, &peer_id,
+ ab->rhash_peer_id_param);
+
+ return peer;
+}
+
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return peer;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return NULL;
+}
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_list_by_id(ab, peer_id);
+ if (!peer) {
+ ath11k_warn(ab, "peer-unmap-event: unknown peer id %d\n",
+ peer_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer unmap vdev %d peer %pM id %d\n",
+ peer->vdev_id, peer->addr, peer_id);
+
+ list_del(&peer->list);
+ kfree(peer);
+ wake_up(&ab->peer_mapping_wq);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+ peer = ath11k_peer_find(ab, vdev_id, mac_addr);
+ if (!peer) {
+ peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
+ if (!peer)
+ goto exit;
+
+ peer->vdev_id = vdev_id;
+ peer->peer_id = peer_id;
+ peer->ast_hash = ast_hash;
+ peer->hw_peer_id = hw_peer_id;
+ ether_addr_copy(peer->addr, mac_addr);
+ list_add(&peer->list, &ab->peers);
+ wake_up(&ab->peer_mapping_wq);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "peer map vdev %d peer %pM id %d\n",
+ vdev_id, mac_addr, peer_id);
+
+exit:
+ spin_unlock_bh(&ab->base_lock);
+}
+
+static int ath11k_wait_for_peer_common(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr, bool expect_mapped)
+{
+ int ret;
+
+ ret = wait_event_timeout(ab->peer_mapping_wq, ({
+ bool mapped;
+
+ spin_lock_bh(&ab->base_lock);
+ mapped = !!ath11k_peer_find(ab, vdev_id, addr);
+ spin_unlock_bh(&ab->base_lock);
+
+ (mapped == expect_mapped ||
+ test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags));
+ }), 3 * HZ);
+
+ if (ret <= 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static inline int ath11k_peer_rhash_insert(struct ath11k_base *ab,
+ struct rhashtable *rtbl,
+ struct rhash_head *rhead,
+ struct rhashtable_params *params,
+ void *key)
+{
+ struct ath11k_peer *tmp;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ tmp = rhashtable_lookup_get_insert_fast(rtbl, rhead, *params);
+
+ if (!tmp)
+ return 0;
+ else if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ else
+ return -EEXIST;
+}
+
+static inline int ath11k_peer_rhash_remove(struct ath11k_base *ab,
+ struct rhashtable *rtbl,
+ struct rhash_head *rhead,
+ struct rhashtable_params *params)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ ret = rhashtable_remove_fast(rtbl, rhead, *params);
+ if (ret && ret != -ENOENT)
+ return ret;
+
+ return 0;
+}
+
+static int ath11k_peer_rhash_add(struct ath11k_base *ab, struct ath11k_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+ return -EPERM;
+
+ ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param, &peer->peer_id);
+ if (ret) {
+ ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_id ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rhash_insert(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+ &ab->rhash_peer_addr_param, &peer->addr);
+ if (ret) {
+ ath11k_warn(ab, "failed to add peer %pM with id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ goto err_clean;
+ }
+
+ return 0;
+
+err_clean:
+ ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param);
+ return ret;
+}
+
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_peer *peer, *tmp;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ mutex_lock(&ab->tbl_mtx_lock);
+ spin_lock_bh(&ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ab->peers, list) {
+ if (peer->vdev_id != vdev_id)
+ continue;
+
+ ath11k_warn(ab, "removing stale peer %pM from vdev_id %d\n",
+ peer->addr, vdev_id);
+
+ ath11k_peer_rhash_delete(ab, peer);
+ list_del(&peer->list);
+ kfree(peer);
+ ar->num_peers--;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+}
+
+static int ath11k_wait_for_peer_deleted(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, false);
+}
+
+int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
+ const u8 *addr)
+{
+ int ret;
+ unsigned long time_left;
+
+ ret = ath11k_wait_for_peer_deleted(ar, vdev_id, addr);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed wait for peer deleted");
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->peer_delete_done,
+ 3 * HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving peer delete response\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int __ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, const u8 *addr)
+{
+ int ret;
+ struct ath11k_peer *peer;
+ struct ath11k_base *ab = ar->ab;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ mutex_lock(&ab->tbl_mtx_lock);
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, addr);
+ /* Check if the found peer is what we want to remove.
+ * While the sta is transitioning to another band we may
+ * have 2 peer with the same addr assigned to different
+ * vdev_id. Make sure we are deleting the correct peer.
+ */
+ if (peer && peer->vdev_id == vdev_id)
+ ath11k_peer_rhash_delete(ab, peer);
+
+ /* Fallback to peer list search if the correct peer can't be found.
+ * Skip the deletion of the peer from the rhash since it has already
+ * been deleted in peer add.
+ */
+ if (!peer)
+ peer = ath11k_peer_find(ab, vdev_id, addr);
+
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ ath11k_warn(ab,
+ "failed to find peer vdev_id %d addr %pM in delete\n",
+ vdev_id, addr);
+ return -EINVAL;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ reinit_completion(&ar->peer_delete_done);
+
+ ret = ath11k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to delete peer vdev_id %d addr %pM ret %d\n",
+ vdev_id, addr, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_delete_done(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ret = __ath11k_peer_delete(ar, vdev_id, addr);
+ if (ret)
+ return ret;
+
+ ar->num_peers--;
+
+ return 0;
+}
+
+static int ath11k_wait_for_peer_created(struct ath11k *ar, int vdev_id, const u8 *addr)
+{
+ return ath11k_wait_for_peer_common(ar->ab, vdev_id, addr, true);
+}
+
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param)
+{
+ struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
+ int ret, fbret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->num_peers > (ar->max_num_peers - 1)) {
+ ath11k_warn(ar->ab,
+ "failed to create peer due to insufficient peer entry resource in firmware\n");
+ return -ENOBUFS;
+ }
+
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_peer_find_by_addr(ar->ab, param->peer_addr);
+ if (peer) {
+ if (peer->vdev_id == param->vdev_id) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ return -EINVAL;
+ }
+
+ /* Assume sta is transitioning to another band.
+ * Remove here the peer from rhash.
+ */
+ ath11k_peer_rhash_delete(ar->ab, peer);
+ }
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+
+ ret = ath11k_wmi_send_peer_create_cmd(ar, param);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send peer create vdev_id %d ret %d\n",
+ param->vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wait_for_peer_created(ar, param->vdev_id,
+ param->peer_addr);
+ if (ret)
+ return ret;
+
+ mutex_lock(&ar->ab->tbl_mtx_lock);
+ spin_lock_bh(&ar->ab->base_lock);
+
+ peer = ath11k_peer_find(ar->ab, param->vdev_id, param->peer_addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ ath11k_warn(ar->ab, "failed to find peer %pM on vdev %i after creation\n",
+ param->peer_addr, param->vdev_id);
+
+ ret = -ENOENT;
+ goto cleanup;
+ }
+
+ ret = ath11k_peer_rhash_add(ar->ab, peer);
+ if (ret) {
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+ goto cleanup;
+ }
+
+ peer->pdev_idx = ar->pdev_idx;
+ peer->sta = sta;
+
+ if (arvif->vif->type == NL80211_IFTYPE_STATION) {
+ arvif->ast_hash = peer->ast_hash;
+ arvif->ast_idx = peer->hw_peer_id;
+ }
+
+ peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
+ peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+ arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
+ FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
+ peer->peer_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+ }
+
+ ar->num_peers++;
+
+ spin_unlock_bh(&ar->ab->base_lock);
+ mutex_unlock(&ar->ab->tbl_mtx_lock);
+
+ return 0;
+
+cleanup:
+ fbret = __ath11k_peer_delete(ar, param->vdev_id, param->peer_addr);
+ if (fbret)
+ ath11k_warn(ar->ab, "failed peer %pM delete vdev_id %d fallback ret %d\n",
+ param->peer_addr, param->vdev_id, fbret);
+
+ return ret;
+}
+
+int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer)
+{
+ int ret;
+
+ lockdep_assert_held(&ab->base_lock);
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id || !ab->rhead_peer_addr)
+ return -EPERM;
+
+ ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_addr, &peer->rhash_addr,
+ &ab->rhash_peer_addr_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_addr ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_peer_rhash_remove(ab, ab->rhead_peer_id, &peer->rhash_id,
+ &ab->rhash_peer_id_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to remove peer %pM id %d in rhash_id ret %d\n",
+ peer->addr, peer->peer_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_peer_rhash_id_tbl_init(struct ath11k_base *ab)
+{
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_id_tbl;
+ int ret;
+ size_t size;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (ab->rhead_peer_id)
+ return 0;
+
+ size = sizeof(*ab->rhead_peer_id);
+ rhash_id_tbl = kzalloc(size, GFP_KERNEL);
+ if (!rhash_id_tbl) {
+ ath11k_warn(ab, "failed to init rhash id table due to no mem (size %zu)\n",
+ size);
+ return -ENOMEM;
+ }
+
+ param = &ab->rhash_peer_id_param;
+
+ param->key_offset = offsetof(struct ath11k_peer, peer_id);
+ param->head_offset = offsetof(struct ath11k_peer, rhash_id);
+ param->key_len = sizeof_field(struct ath11k_peer, peer_id);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
+
+ ret = rhashtable_init(rhash_id_tbl, param);
+ if (ret) {
+ ath11k_warn(ab, "failed to init peer id rhash table %d\n", ret);
+ goto err_free;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (!ab->rhead_peer_id) {
+ ab->rhead_peer_id = rhash_id_tbl;
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ goto cleanup_tbl;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+cleanup_tbl:
+ rhashtable_destroy(rhash_id_tbl);
+err_free:
+ kfree(rhash_id_tbl);
+
+ return ret;
+}
+
+static int ath11k_peer_rhash_addr_tbl_init(struct ath11k_base *ab)
+{
+ struct rhashtable_params *param;
+ struct rhashtable *rhash_addr_tbl;
+ int ret;
+ size_t size;
+
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (ab->rhead_peer_addr)
+ return 0;
+
+ size = sizeof(*ab->rhead_peer_addr);
+ rhash_addr_tbl = kzalloc(size, GFP_KERNEL);
+ if (!rhash_addr_tbl) {
+ ath11k_warn(ab, "failed to init rhash addr table due to no mem (size %zu)\n",
+ size);
+ return -ENOMEM;
+ }
+
+ param = &ab->rhash_peer_addr_param;
+
+ param->key_offset = offsetof(struct ath11k_peer, addr);
+ param->head_offset = offsetof(struct ath11k_peer, rhash_addr);
+ param->key_len = sizeof_field(struct ath11k_peer, addr);
+ param->automatic_shrinking = true;
+ param->nelem_hint = ab->num_radios * TARGET_NUM_PEERS_PDEV(ab);
+
+ ret = rhashtable_init(rhash_addr_tbl, param);
+ if (ret) {
+ ath11k_warn(ab, "failed to init peer addr rhash table %d\n", ret);
+ goto err_free;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+
+ if (!ab->rhead_peer_addr) {
+ ab->rhead_peer_addr = rhash_addr_tbl;
+ } else {
+ spin_unlock_bh(&ab->base_lock);
+ goto cleanup_tbl;
+ }
+
+ spin_unlock_bh(&ab->base_lock);
+
+ return 0;
+
+cleanup_tbl:
+ rhashtable_destroy(rhash_addr_tbl);
+err_free:
+ kfree(rhash_addr_tbl);
+
+ return ret;
+}
+
+static inline void ath11k_peer_rhash_id_tbl_destroy(struct ath11k_base *ab)
+{
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_id)
+ return;
+
+ rhashtable_destroy(ab->rhead_peer_id);
+ kfree(ab->rhead_peer_id);
+ ab->rhead_peer_id = NULL;
+}
+
+static inline void ath11k_peer_rhash_addr_tbl_destroy(struct ath11k_base *ab)
+{
+ lockdep_assert_held(&ab->tbl_mtx_lock);
+
+ if (!ab->rhead_peer_addr)
+ return;
+
+ rhashtable_destroy(ab->rhead_peer_addr);
+ kfree(ab->rhead_peer_addr);
+ ab->rhead_peer_addr = NULL;
+}
+
+int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ mutex_lock(&ab->tbl_mtx_lock);
+
+ ret = ath11k_peer_rhash_id_tbl_init(ab);
+ if (ret)
+ goto out;
+
+ ret = ath11k_peer_rhash_addr_tbl_init(ab);
+ if (ret)
+ goto cleanup_tbl;
+
+ mutex_unlock(&ab->tbl_mtx_lock);
+
+ return 0;
+
+cleanup_tbl:
+ ath11k_peer_rhash_id_tbl_destroy(ab);
+out:
+ mutex_unlock(&ab->tbl_mtx_lock);
+ return ret;
+}
+
+void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab)
+{
+ mutex_lock(&ab->tbl_mtx_lock);
+
+ ath11k_peer_rhash_addr_tbl_destroy(ab);
+ ath11k_peer_rhash_id_tbl_destroy(ab);
+
+ mutex_unlock(&ab->tbl_mtx_lock);
+}
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
new file mode 100644
index 0000000000..9bd385d0a3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_PEER_H
+#define ATH11K_PEER_H
+
+struct ath11k_peer {
+ struct list_head list;
+ struct ieee80211_sta *sta;
+ int vdev_id;
+ u8 addr[ETH_ALEN];
+ int peer_id;
+ u16 ast_hash;
+ u8 pdev_idx;
+ u16 hw_peer_id;
+
+ /* protected by ab->data_lock */
+ struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
+ struct dp_rx_tid rx_tid[IEEE80211_NUM_TIDS + 1];
+
+ /* peer id based rhashtable list pointer */
+ struct rhash_head rhash_id;
+ /* peer addr based rhashtable list pointer */
+ struct rhash_head rhash_addr;
+
+ /* Info used in MMIC verification of
+ * RX fragments
+ */
+ struct crypto_shash *tfm_mmic;
+ u8 mcast_keyidx;
+ u8 ucast_keyidx;
+ u16 sec_type;
+ u16 sec_type_grp;
+ bool is_authorized;
+ bool dp_setup_done;
+};
+
+void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
+void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
+ u8 *mac_addr, u16 ast_hash, u16 hw_peer_id);
+struct ath11k_peer *ath11k_peer_find(struct ath11k_base *ab, int vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
+void ath11k_peer_cleanup(struct ath11k *ar, u32 vdev_id);
+int ath11k_peer_delete(struct ath11k *ar, u32 vdev_id, u8 *addr);
+int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta, struct peer_create_params *param);
+int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
+ const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id);
+int ath11k_peer_rhash_tbl_init(struct ath11k_base *ab);
+void ath11k_peer_rhash_tbl_destroy(struct ath11k_base *ab);
+int ath11k_peer_rhash_delete(struct ath11k_base *ab, struct ath11k_peer *peer);
+#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
new file mode 100644
index 0000000000..41fad03a30
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -0,0 +1,3335 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/elf.h>
+
+#include "qmi.h"
+#include "core.h"
+#include "debug.h"
+#include "hif.h"
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/ioport.h>
+#include <linux/firmware.h>
+#include <linux/of_irq.h>
+
+#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
+#define HOST_CSTATE_BIT 0x04
+#define PLATFORM_CAP_PCIE_GLOBAL_RESET 0x08
+#define PLATFORM_CAP_PCIE_PME_D3COLD 0x10
+
+#define FW_BUILD_ID_MASK "QC_IMAGE_VERSION_STRING="
+
+bool ath11k_cold_boot_cal = 1;
+EXPORT_SYMBOL(ath11k_cold_boot_cal);
+module_param_named(cold_boot_cal, ath11k_cold_boot_cal, bool, 0644);
+MODULE_PARM_DESC(cold_boot_cal,
+ "Decrease the channel switch time but increase the driver load time (Default: true)");
+
+static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ num_clients),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
+ .elem_size = sizeof(u32),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ gpios),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ nm_modem),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ bdf_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ m3_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_filesys_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_cache_support),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ cal_done),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_bucket),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1C,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_req_msg_v01,
+ mem_cfg_mode),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_host_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_host_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_download_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ initiate_cal_update_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ msa_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ pin_connect_result_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ client_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ request_mem_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_mem_ready_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ fw_init_done_enable),
+ },
+
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ rejuvenate_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ xo_cal_enable),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_req_msg_v01,
+ cal_done_enable),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ind_register_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_ind_register_resp_msg_v01,
+ fw_status),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, offset),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, size),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_cfg_s_v01, secure_flag),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01,
+ size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, type),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_MEM_CFG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_s_v01, mem_cfg),
+ .ei_array = qmi_wlanfw_mem_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_request_mem_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_request_mem_ind_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_seg_resp_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, size),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_mem_type_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, type),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_mem_seg_resp_s_v01, restore),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_mem_seg_resp_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_req_msg_v01,
+ mem_seg),
+ .ei_array = qmi_wlanfw_mem_seg_resp_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_respond_mem_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_respond_mem_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_cap_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_device_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlfw_device_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_addr_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_addr),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_device_info_resp_msg_v01,
+ bar_size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_rf_chip_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_chip_info_s_v01,
+ chip_family),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_rf_board_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_rf_board_info_s_v01,
+ board_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_soc_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_soc_info_s_v01, soc_id),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_fw_version_info_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_version),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_fw_version_info_s_v01,
+ fw_build_timestamp),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_chip_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ chip_info),
+ .ei_array = qmi_wlanfw_rf_chip_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_rf_board_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ board_info),
+ .ei_array = qmi_wlanfw_rf_board_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_soc_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ soc_info),
+ .ei_array = qmi_wlanfw_soc_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info_valid),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_wlanfw_fw_version_info_s_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_version_info),
+ .ei_array = qmi_wlanfw_fw_version_info_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ fw_build_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ num_macs),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ valid),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_cal_temp_id_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ file_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ total_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ seg_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data_len),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = QMI_WLANFW_MAX_DATA_SIZE_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ data),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ end),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_req_msg_v01,
+ bdf_type),
+ },
+
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_bdf_download_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_bdf_download_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_m3_info_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_8_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u64),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, addr),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_req_msg_v01, size),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_m3_info_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_m3_info_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nentries),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ nbytes_max),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01,
+ flags),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ service_id),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum qmi_wlanfw_pipedir_enum_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_dir),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01,
+ pipe_num),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01, id),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u16),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_cfg_s_v01,
+ offset),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0,
+ .offset = offsetof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01,
+ addr),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ mode),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_req_msg_v01,
+ hw_debug),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_mode_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_mode_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version_valid),
+ },
+ {
+ .data_type = QMI_STRING,
+ .elem_len = QMI_WLANFW_MAX_STR_LEN_V01 + 1,
+ .elem_size = sizeof(char),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ host_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_CE_V01,
+ .elem_size = sizeof(
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ tgt_cfg),
+ .ei_array = qmi_wlanfw_ce_tgt_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SVC_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ svc_cfg),
+ .ei_array = qmi_wlanfw_ce_svc_pipe_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg),
+ .ei_array = qmi_wlanfw_shadow_reg_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_valid),
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2_len),
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01,
+ .elem_size = sizeof(struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_req_msg_v01,
+ shadow_reg_v2),
+ .ei_array = qmi_wlanfw_shadow_reg_v2_cfg_s_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_cfg_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_cfg_resp_msg_v01, resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_mem_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_fw_ready_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enablefwlog_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_req_msg_v01,
+ enablefwlog),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlanfw_wlan_ini_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct qmi_wlanfw_wlan_ini_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static const struct qmi_elem_info qmi_wlfw_fw_init_done_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ },
+};
+
+static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_host_cap_req_msg_v01 req;
+ struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.num_clients_valid = 1;
+ req.num_clients = 1;
+ req.mem_cfg_mode = ab->qmi.target_mem_mode;
+ req.mem_cfg_mode_valid = 1;
+ req.bdf_support_valid = 1;
+ req.bdf_support = 1;
+
+ if (ab->hw_params.m3_fw_support) {
+ req.m3_support_valid = 1;
+ req.m3_support = 1;
+ req.m3_cache_support_valid = 1;
+ req.m3_cache_support = 1;
+ } else {
+ req.m3_support_valid = 0;
+ req.m3_support = 0;
+ req.m3_cache_support_valid = 0;
+ req.m3_cache_support = 0;
+ }
+
+ req.cal_done_valid = 1;
+ req.cal_done = ab->qmi.cal_done;
+
+ if (ab->hw_params.internal_sleep_clock) {
+ req.nm_modem_valid = 1;
+
+ /* Notify firmware that this is non-qualcomm platform. */
+ req.nm_modem |= HOST_CSTATE_BIT;
+
+ /* Notify firmware about the sleep clock selection,
+ * nm_modem_bit[1] is used for this purpose. Host driver on
+ * non-qualcomm platforms should select internal sleep
+ * clock.
+ */
+ req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
+ }
+
+ if (ab->hw_params.global_reset)
+ req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
+
+ req.nm_modem |= PLATFORM_CAP_PCIE_PME_D3COLD;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "host cap request\n");
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_HOST_CAP_REQ_V01,
+ QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_host_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send host capability request: %d\n", ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "host capability request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_fw_ind_register_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_ind_register_req_msg_v01 *req;
+ struct qmi_wlanfw_ind_register_resp_msg_v01 *resp;
+ struct qmi_handle *handle = &ab->qmi.handle;
+ struct qmi_txn txn;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ ret = -ENOMEM;
+ goto resp_out;
+ }
+
+ req->client_id_valid = 1;
+ req->client_id = QMI_WLANFW_CLIENT_ID;
+ req->fw_ready_enable_valid = 1;
+ req->fw_ready_enable = 1;
+ req->cal_done_enable_valid = 1;
+ req->cal_done_enable = 1;
+ req->fw_init_done_enable_valid = 1;
+ req->fw_init_done_enable = 1;
+
+ req->pin_connect_result_enable_valid = 0;
+ req->pin_connect_result_enable = 0;
+
+ /* WCN6750 doesn't request for DDR memory via QMI,
+ * instead it uses a fixed 12MB reserved memory
+ * region in DDR.
+ */
+ if (!ab->hw_params.fixed_fw_mem) {
+ req->request_mem_enable_valid = 1;
+ req->request_mem_enable = 1;
+ req->fw_mem_ready_enable_valid = 1;
+ req->fw_mem_ready_enable = 1;
+ }
+
+ ret = qmi_txn_init(handle, &txn,
+ qmi_wlanfw_ind_register_resp_msg_v01_ei, resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "indication register request\n");
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_IND_REGISTER_REQ_V01,
+ QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_ind_register_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send indication register request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to register fw indication: %d\n", ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "firmware indication register request failed: %d %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(resp);
+resp_out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
+ struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0, i;
+ bool delayed;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ /* For QCA6390 by default FW requests a block of ~4M contiguous
+ * DMA memory, it's hard to allocate from OS. So host returns
+ * failure to FW and FW will then request multiple blocks of small
+ * chunk size memory.
+ */
+ if (!(ab->hw_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
+ ab->qmi.target_mem_delayed) {
+ delayed = true;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "delays mem_request %d\n",
+ ab->qmi.mem_seg_count);
+ memset(req, 0, sizeof(*req));
+ } else {
+ delayed = false;
+ req->mem_seg_len = ab->qmi.mem_seg_count;
+
+ for (i = 0; i < req->mem_seg_len ; i++) {
+ req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
+ req->mem_seg[i].size = ab->qmi.target_mem[i].size;
+ req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "req mem_seg[%d] %pad %u %u\n", i,
+ &ab->qmi.target_mem[i].paddr,
+ ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].type);
+ }
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "respond memory request delayed %i\n",
+ delayed);
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_RESPOND_MEM_REQ_V01,
+ QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_respond_mem_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to respond qmi memory request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait qmi memory request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
+ ath11k_warn(ab, "qmi respond memory request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ kfree(req);
+ return ret;
+}
+
+static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ if ((ab->hw_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) &&
+ ab->qmi.target_mem[i].iaddr)
+ iounmap(ab->qmi.target_mem[i].iaddr);
+
+ if (!ab->qmi.target_mem[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].prev_size,
+ ab->qmi.target_mem[i].vaddr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].vaddr = NULL;
+ }
+}
+
+static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
+{
+ int i;
+ struct target_mem_chunk *chunk;
+
+ ab->qmi.target_mem_delayed = false;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ chunk = &ab->qmi.target_mem[i];
+
+ /* Firmware reloads in coldboot/firmware recovery.
+ * in such case, no need to allocate memory for FW again.
+ */
+ if (chunk->vaddr) {
+ if (chunk->prev_type == chunk->type &&
+ chunk->prev_size == chunk->size)
+ continue;
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->prev_size,
+ chunk->vaddr, chunk->paddr);
+ chunk->vaddr = NULL;
+ }
+
+ chunk->vaddr = dma_alloc_coherent(ab->dev,
+ chunk->size,
+ &chunk->paddr,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!chunk->vaddr) {
+ if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
+
+ ath11k_err(ab, "failed to allocate dma memory for qmi (%d B type %u)\n",
+ chunk->size,
+ chunk->type);
+ return -EINVAL;
+ }
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
+ }
+
+ return 0;
+}
+
+static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
+{
+ struct device *dev = ab->dev;
+ struct device_node *hremote_node = NULL;
+ struct resource res;
+ u32 host_ddr_sz;
+ int i, idx, ret;
+
+ for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
+ switch (ab->qmi.target_mem[i].type) {
+ case HOST_DDR_REGION_TYPE:
+ hremote_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+ if (!hremote_node) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "fail to get hremote_node\n");
+ return -ENODEV;
+ }
+
+ ret = of_address_to_resource(hremote_node, 0, &res);
+ of_node_put(hremote_node);
+ if (ret) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "fail to get reg from hremote\n");
+ return ret;
+ }
+
+ if (res.end - res.start + 1 < ab->qmi.target_mem[i].size) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "fail to assign memory of sz\n");
+ return -EINVAL;
+ }
+
+ ab->qmi.target_mem[idx].paddr = res.start;
+ ab->qmi.target_mem[idx].iaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].iaddr)
+ return -EIO;
+
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ host_ddr_sz = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case BDF_MEM_REGION_TYPE:
+ ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
+ ab->qmi.target_mem[idx].vaddr = NULL;
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ case CALDB_MEM_REGION_TYPE:
+ if (ab->qmi.target_mem[i].size > ATH11K_QMI_CALDB_SIZE) {
+ ath11k_warn(ab, "qmi mem size is low to load caldata\n");
+ return -EINVAL;
+ }
+
+ if (ath11k_core_coldboot_cal_support(ab)) {
+ if (hremote_node) {
+ ab->qmi.target_mem[idx].paddr =
+ res.start + host_ddr_sz;
+ ab->qmi.target_mem[idx].iaddr =
+ ioremap(ab->qmi.target_mem[idx].paddr,
+ ab->qmi.target_mem[i].size);
+ if (!ab->qmi.target_mem[idx].iaddr)
+ return -EIO;
+ } else {
+ ab->qmi.target_mem[idx].paddr =
+ ATH11K_QMI_CALDB_ADDRESS;
+ }
+ } else {
+ ab->qmi.target_mem[idx].paddr = 0;
+ ab->qmi.target_mem[idx].vaddr = NULL;
+ }
+ ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
+ ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
+ idx++;
+ break;
+ default:
+ ath11k_warn(ab, "qmi ignore invalid mem req type %d\n",
+ ab->qmi.target_mem[i].type);
+ break;
+ }
+ }
+ ab->qmi.mem_seg_count = idx;
+
+ return 0;
+}
+
+static int ath11k_qmi_request_device_info(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_device_info_req_msg_v01 req = {};
+ struct qmi_wlanfw_device_info_resp_msg_v01 resp = {};
+ struct qmi_txn txn;
+ void __iomem *bar_addr_va;
+ int ret;
+
+ /* device info message req is only sent for hybrid bus devices */
+ if (!ab->hw_params.hybrid_bus_type)
+ return 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlfw_device_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_DEVICE_INFO_REQ_V01,
+ QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_device_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send qmi target device info request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait qmi target device info request: %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi device info request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!resp.bar_addr_valid || !resp.bar_size_valid) {
+ ath11k_warn(ab, "qmi device info response invalid: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!resp.bar_addr ||
+ resp.bar_size != ATH11K_QMI_DEVICE_BAR_SIZE) {
+ ath11k_warn(ab, "qmi device info invalid address and size: %llu %u\n",
+ resp.bar_addr, resp.bar_size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ bar_addr_va = devm_ioremap(ab->dev, resp.bar_addr, resp.bar_size);
+
+ if (!bar_addr_va) {
+ ath11k_warn(ab, "qmi device info ioremap failed\n");
+ ab->mem_len = 0;
+ ret = -EIO;
+ goto out;
+ }
+
+ ab->mem = bar_addr_va;
+ ab->mem_len = resp.bar_size;
+
+ return 0;
+out:
+ return ret;
+}
+
+static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_cap_req_msg_v01 req;
+ struct qmi_wlanfw_cap_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0;
+ int r;
+ char *fw_build_id;
+ int fw_build_id_mask_len;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "target cap request\n");
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_CAP_REQ_V01,
+ QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_cap_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send qmi cap request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait qmi cap request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi cap request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (resp.chip_info_valid) {
+ ab->qmi.target.chip_id = resp.chip_info.chip_id;
+ ab->qmi.target.chip_family = resp.chip_info.chip_family;
+ }
+
+ if (resp.board_info_valid)
+ ab->qmi.target.board_id = resp.board_info.board_id;
+ else
+ ab->qmi.target.board_id = 0xFF;
+
+ if (resp.soc_info_valid)
+ ab->qmi.target.soc_id = resp.soc_info.soc_id;
+
+ if (resp.fw_version_info_valid) {
+ ab->qmi.target.fw_version = resp.fw_version_info.fw_version;
+ strscpy(ab->qmi.target.fw_build_timestamp,
+ resp.fw_version_info.fw_build_timestamp,
+ sizeof(ab->qmi.target.fw_build_timestamp));
+ }
+
+ if (resp.fw_build_id_valid)
+ strscpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
+ sizeof(ab->qmi.target.fw_build_id));
+
+ if (resp.eeprom_read_timeout_valid) {
+ ab->qmi.target.eeprom_caldata =
+ resp.eeprom_read_timeout;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "cal data supported from eeprom\n");
+ }
+
+ fw_build_id = ab->qmi.target.fw_build_id;
+ fw_build_id_mask_len = strlen(FW_BUILD_ID_MASK);
+ if (!strncmp(fw_build_id, FW_BUILD_ID_MASK, fw_build_id_mask_len))
+ fw_build_id = fw_build_id + fw_build_id_mask_len;
+
+ ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
+ ab->qmi.target.chip_id, ab->qmi.target.chip_family,
+ ab->qmi.target.board_id, ab->qmi.target.soc_id);
+
+ ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
+ ab->qmi.target.fw_version,
+ ab->qmi.target.fw_build_timestamp,
+ fw_build_id);
+
+ r = ath11k_core_check_smbios(ab);
+ if (r)
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
+
+ r = ath11k_core_check_dt(ab);
+ if (r)
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "DT bdf variant name not set.\n");
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
+ const u8 *data, u32 len, u8 type)
+{
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ const u8 *temp = data;
+ void __iomem *bdf_addr = NULL;
+ int ret;
+ u32 remaining = len;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ if (ab->hw_params.fixed_bdf_addr) {
+ bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
+ if (!bdf_addr) {
+ ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
+ ret = -EIO;
+ goto err_free_req;
+ }
+ }
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = ab->qmi.target.board_id;
+ req->total_size_valid = 1;
+ req->total_size = remaining;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->bdf_type = type;
+ req->bdf_type_valid = 1;
+ req->end_valid = 1;
+ req->end = 0;
+
+ if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ if (ab->hw_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ req->data_valid = 0;
+ req->end = 1;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ memcpy(req->data, temp, req->data_len);
+ }
+
+ if (ab->hw_params.fixed_bdf_addr) {
+ if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
+ bdf_addr += ab->hw_params.fw.cal_offset;
+
+ memcpy_toio(bdf_addr, temp, len);
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto err_iounmap;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download req fixed addr type %d\n",
+ type);
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto err_iounmap;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait board file download request: %d\n",
+ ret);
+ goto err_iounmap;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "board file download request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto err_iounmap;
+ }
+
+ if (ab->hw_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ remaining = 0;
+ } else {
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf download request remaining %i\n",
+ remaining);
+ }
+ }
+
+err_iounmap:
+ if (ab->hw_params.fixed_bdf_addr)
+ iounmap(bdf_addr);
+
+err_free_req:
+ kfree(req);
+
+ return ret;
+}
+
+static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab,
+ bool regdb)
+{
+ struct device *dev = ab->dev;
+ char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
+ struct ath11k_board_data bd;
+ u32 fw_size, file_type;
+ int ret = 0, bdf_type;
+ const u8 *tmp;
+
+ memset(&bd, 0, sizeof(bd));
+
+ if (regdb) {
+ ret = ath11k_core_fetch_regdb(ab, &bd);
+ } else {
+ ret = ath11k_core_fetch_bdf(ab, &bd);
+ if (ret)
+ ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret);
+ }
+
+ if (ret)
+ goto out;
+
+ if (regdb)
+ bdf_type = ATH11K_QMI_BDF_TYPE_REGDB;
+ else if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
+ bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
+ else
+ bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "bdf_type %d\n", bdf_type);
+
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
+
+ ret = ath11k_qmi_load_file_target_mem(ab, bd.data, fw_size, bdf_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load bdf file\n");
+ goto out;
+ }
+
+ /* QCA6390/WCN6855 does not support cal data, skip it */
+ if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB)
+ goto out;
+
+ if (ab->qmi.target.eeprom_caldata) {
+ file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
+ tmp = filename;
+ fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ file_type = ATH11K_QMI_FILE_TYPE_CALDATA;
+
+ /* cal-<bus>-<id>.bin */
+ snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath11k_bus_str(ab->hif.bus), dev_name(dev));
+ fw_entry = ath11k_core_firmware_request(ab, filename);
+ if (!IS_ERR(fw_entry))
+ goto success;
+
+ fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
+ if (IS_ERR(fw_entry)) {
+ /* Caldata may not be present during first time calibration in
+ * factory hence allow to boot without loading caldata in ftm mode
+ */
+ if (ath11k_ftm_mode) {
+ ath11k_info(ab,
+ "Booting without cal data file in factory test mode\n");
+ return 0;
+ }
+ ret = PTR_ERR(fw_entry);
+ ath11k_warn(ab,
+ "qmi failed to load CAL data file:%s\n",
+ filename);
+ goto out;
+ }
+success:
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size);
+ tmp = fw_entry->data;
+ }
+
+ ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load caldata\n");
+ goto out_qmi_cal;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "caldata type: %u\n", file_type);
+
+out_qmi_cal:
+ if (!ab->qmi.target.eeprom_caldata)
+ release_firmware(fw_entry);
+out:
+ ath11k_core_free_bdf(ab, &bd);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "BDF download sequence completed\n");
+
+ return ret;
+}
+
+static int ath11k_qmi_m3_load(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ const struct firmware *fw;
+ char path[100];
+ int ret;
+
+ fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE,
+ path, sizeof(path));
+ ath11k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ if (m3_mem->vaddr || m3_mem->size)
+ goto skip_m3_alloc;
+
+ m3_mem->vaddr = dma_alloc_coherent(ab->dev,
+ fw->size, &m3_mem->paddr,
+ GFP_KERNEL);
+ if (!m3_mem->vaddr) {
+ ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
+ fw->size);
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+skip_m3_alloc:
+ memcpy(m3_mem->vaddr, fw->data, fw->size);
+ m3_mem->size = fw->size;
+ release_firmware(fw);
+
+ return 0;
+}
+
+static void ath11k_qmi_m3_free(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!ab->hw_params.m3_fw_support || !m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+ m3_mem->size = 0;
+}
+
+static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ struct qmi_wlanfw_m3_info_req_msg_v01 req;
+ struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ if (ab->hw_params.m3_fw_support) {
+ ret = ath11k_qmi_m3_load(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to load m3 firmware: %d", ret);
+ return ret;
+ }
+
+ req.addr = m3_mem->paddr;
+ req.size = m3_mem->size;
+ } else {
+ req.addr = 0;
+ req.size = 0;
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "m3 info req\n");
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_M3_INFO_REQ_V01,
+ QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ qmi_wlanfw_m3_info_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send m3 information request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait m3 information request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "m3 info request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_mode_send(struct ath11k_base *ab,
+ u32 mode)
+{
+ struct qmi_wlanfw_wlan_mode_req_msg_v01 req;
+ struct qmi_wlanfw_wlan_mode_resp_msg_v01 resp;
+ struct qmi_txn txn;
+ int ret = 0;
+
+ memset(&req, 0, sizeof(req));
+ memset(&resp, 0, sizeof(resp));
+
+ req.mode = mode;
+ req.hw_debug_valid = 1;
+ req.hw_debug = 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_mode_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan mode req mode %d\n", mode);
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_MODE_REQ_V01,
+ QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_mode_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send wlan mode request (mode %d): %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ if (mode == ATH11K_FIRMWARE_MODE_OFF && ret == -ENETRESET) {
+ ath11k_warn(ab, "WLFW service is dis-connected\n");
+ return 0;
+ }
+ ath11k_warn(ab, "failed to wait wlan mode request (mode %d): %d\n",
+ mode, ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "wlan mode request failed (mode: %d): %d %d\n",
+ mode, resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_wlan_cfg_req_msg_v01 *req;
+ struct qmi_wlanfw_wlan_cfg_resp_msg_v01 resp;
+ struct ce_pipe_config *ce_cfg;
+ struct service_to_pipe *svc_cfg;
+ struct qmi_txn txn;
+ int ret = 0, pipe_num;
+
+ ce_cfg = (struct ce_pipe_config *)ab->qmi.ce_cfg.tgt_ce;
+ svc_cfg = (struct service_to_pipe *)ab->qmi.ce_cfg.svc_to_ce_map;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ memset(&resp, 0, sizeof(resp));
+
+ req->host_version_valid = 1;
+ strscpy(req->host_version, ATH11K_HOST_VERSION_STRING,
+ sizeof(req->host_version));
+
+ req->tgt_cfg_valid = 1;
+ /* This is number of CE configs */
+ req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
+ for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
+ req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
+ req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
+ req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
+ req->tgt_cfg[pipe_num].nbytes_max = ce_cfg[pipe_num].nbytes_max;
+ req->tgt_cfg[pipe_num].flags = ce_cfg[pipe_num].flags;
+ }
+
+ req->svc_cfg_valid = 1;
+ /* This is number of Service/CE configs */
+ req->svc_cfg_len = ab->qmi.ce_cfg.svc_to_ce_map_len;
+ for (pipe_num = 0; pipe_num < req->svc_cfg_len; pipe_num++) {
+ req->svc_cfg[pipe_num].service_id = svc_cfg[pipe_num].service_id;
+ req->svc_cfg[pipe_num].pipe_dir = svc_cfg[pipe_num].pipedir;
+ req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
+ }
+ req->shadow_reg_valid = 0;
+
+ /* set shadow v2 configuration */
+ if (ab->hw_params.supports_shadow_regs) {
+ req->shadow_reg_v2_valid = 1;
+ req->shadow_reg_v2_len = min_t(u32,
+ ab->qmi.ce_cfg.shadow_reg_v2_len,
+ QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
+ memcpy(&req->shadow_reg_v2, ab->qmi.ce_cfg.shadow_reg_v2,
+ sizeof(u32) * req->shadow_reg_v2_len);
+ } else {
+ req->shadow_reg_v2_valid = 0;
+ }
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wlan cfg req\n");
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_CFG_REQ_V01,
+ QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_cfg_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ ath11k_warn(ab, "failed to send wlan config request: %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait wlan config request: %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "wlan config request failed: %d %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ goto out;
+ }
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_wlanfw_wlan_ini_send(struct ath11k_base *ab, bool enable)
+{
+ int ret;
+ struct qmi_txn txn;
+ struct qmi_wlanfw_wlan_ini_req_msg_v01 req = {};
+ struct qmi_wlanfw_wlan_ini_resp_msg_v01 resp = {};
+
+ req.enablefwlog_valid = true;
+ req.enablefwlog = enable ? 1 : 0;
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_wlan_ini_resp_msg_v01_ei, &resp);
+ if (ret < 0)
+ goto out;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_WLAN_INI_REQ_V01,
+ QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_wlan_ini_req_msg_v01_ei, &req);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan ini request, err = %d\n",
+ ret);
+ qmi_txn_cancel(&txn);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed wlan ini request, err = %d\n", ret);
+ goto out;
+ }
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi wlan ini request failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = -EINVAL;
+ }
+
+out:
+ return ret;
+}
+
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab)
+{
+ int ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware stop\n");
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_OFF);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan mode off: %d\n", ret);
+ return;
+ }
+}
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode)
+{
+ int ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware start\n");
+
+ if (ab->hw_params.fw_wmi_diag_event) {
+ ret = ath11k_qmi_wlanfw_wlan_ini_send(ab, true);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw ini:%d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_qmi_wlanfw_wlan_cfg_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan cfg: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, mode);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
+{
+ int timeout;
+
+ if (!ath11k_core_coldboot_cal_support(ab) ||
+ ab->hw_params.cbcal_restart_fw == 0)
+ return 0;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n");
+
+ timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
+
+ if (timeout <= 0) {
+ ath11k_warn(ab, "Coldboot Calibration timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /* reset the firmware */
+ ath11k_hif_power_down(ab);
+ ath11k_hif_power_up(ab);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n");
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot);
+
+static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
+{
+ int timeout;
+ int ret;
+
+ ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to send wlan fw mode: %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n");
+
+ timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
+ if (timeout <= 0) {
+ ath11k_warn(ab, "coldboot calibration timed out\n");
+ return 0;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration done\n");
+
+ return 0;
+}
+
+static int
+ath11k_qmi_driver_event_post(struct ath11k_qmi *qmi,
+ enum ath11k_qmi_event_type type,
+ void *data)
+{
+ struct ath11k_qmi_driver_event *event;
+
+ event = kzalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return -ENOMEM;
+
+ event->type = type;
+ event->data = data;
+
+ spin_lock(&qmi->event_lock);
+ list_add_tail(&event->list, &qmi->event_list);
+ spin_unlock(&qmi->event_lock);
+
+ queue_work(qmi->event_wq, &qmi->event_work);
+
+ return 0;
+}
+
+static int ath11k_qmi_event_mem_request(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_respond_fw_mem_request(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to respond fw mem req: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_request_target_cap(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to request qmi target capabilities: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_request_device_info(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to request qmi device info: %d\n", ret);
+ return ret;
+ }
+
+ if (ab->hw_params.supports_regdb)
+ ath11k_qmi_load_bdf_qmi(ab, true);
+
+ ret = ath11k_qmi_load_bdf_qmi(ab, false);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to load board data file: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_qmi_event_server_arrive(struct ath11k_qmi *qmi)
+{
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ ret = ath11k_qmi_fw_ind_register_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to send qmi firmware indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath11k_qmi_host_cap_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to send qmi host cap: %d\n", ret);
+ return ret;
+ }
+
+ if (!ab->hw_params.fixed_fw_mem)
+ return ret;
+
+ ret = ath11k_qmi_event_load_bdf(qmi);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to download BDF:%d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ const struct qmi_wlanfw_request_mem_ind_msg_v01 *msg = data;
+ int i, ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware request memory request\n");
+
+ if (msg->mem_seg_len == 0 ||
+ msg->mem_seg_len > ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01)
+ ath11k_warn(ab, "invalid memory segment length: %u\n",
+ msg->mem_seg_len);
+
+ ab->qmi.mem_seg_count = msg->mem_seg_len;
+
+ for (i = 0; i < qmi->mem_seg_count ; i++) {
+ ab->qmi.target_mem[i].type = msg->mem_seg[i].type;
+ ab->qmi.target_mem[i].size = msg->mem_seg[i].size;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "mem seg type %d size %d\n",
+ msg->mem_seg[i].type, msg->mem_seg[i].size);
+ }
+
+ if (ab->hw_params.fixed_mem_region ||
+ test_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags)) {
+ ret = ath11k_qmi_assign_target_mem_chunk(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to assign qmi target memory: %d\n",
+ ret);
+ return;
+ }
+ } else {
+ ret = ath11k_qmi_alloc_target_mem_chunk(ab);
+ if (ret) {
+ ath11k_warn(ab, "failed to allocate qmi target memory: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL);
+}
+
+static void ath11k_qmi_msg_mem_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware memory ready indication\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_MEM_READY, NULL);
+}
+
+static void ath11k_qmi_msg_fw_ready_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware ready\n");
+
+ if (!ab->qmi.cal_done) {
+ ab->qmi.cal_done = 1;
+ wake_up(&ab->qmi.cold_boot_waitq);
+ }
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_READY, NULL);
+}
+
+static void ath11k_qmi_msg_cold_boot_cal_done_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl,
+ struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ab->qmi.cal_done = 1;
+ wake_up(&ab->qmi.cold_boot_waitq);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "cold boot calibration done\n");
+}
+
+static void ath11k_qmi_msg_fw_init_done_cb(struct qmi_handle *qmi_hdl,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *decoded)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl,
+ struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_FW_INIT_DONE, NULL);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware init done\n");
+}
+
+static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
+ .ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01),
+ .fn = ath11k_qmi_msg_mem_request_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
+ .ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01),
+ .fn = ath11k_qmi_msg_mem_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_READY_IND_V01,
+ .ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
+ .fn = ath11k_qmi_msg_fw_ready_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
+ .ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
+ .fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_FW_INIT_DONE_IND_V01,
+ .ei = qmi_wlfw_fw_init_done_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct qmi_wlfw_fw_init_done_ind_msg_v01),
+ .fn = ath11k_qmi_msg_fw_init_done_cb,
+ },
+
+ /* end of list */
+ {},
+};
+
+static int ath11k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+ struct sockaddr_qrtr *sq = &qmi->sq;
+ int ret;
+
+ sq->sq_family = AF_QIPCRTR;
+ sq->sq_node = service->node;
+ sq->sq_port = service->port;
+
+ ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)sq,
+ sizeof(*sq), 0);
+ if (ret) {
+ ath11k_warn(ab, "failed to connect to qmi remote service: %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw qmi service connected\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_ARRIVE, NULL);
+
+ return ret;
+}
+
+static void ath11k_qmi_ops_del_server(struct qmi_handle *qmi_hdl,
+ struct qmi_service *service)
+{
+ struct ath11k_qmi *qmi = container_of(qmi_hdl, struct ath11k_qmi, handle);
+ struct ath11k_base *ab = qmi->ab;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "wifi fw del server\n");
+ ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_SERVER_EXIT, NULL);
+}
+
+static const struct qmi_ops ath11k_qmi_ops = {
+ .new_server = ath11k_qmi_ops_new_server,
+ .del_server = ath11k_qmi_ops_del_server,
+};
+
+static void ath11k_qmi_driver_event_work(struct work_struct *work)
+{
+ struct ath11k_qmi *qmi = container_of(work, struct ath11k_qmi,
+ event_work);
+ struct ath11k_qmi_driver_event *event;
+ struct ath11k_base *ab = qmi->ab;
+ int ret;
+
+ spin_lock(&qmi->event_lock);
+ while (!list_empty(&qmi->event_list)) {
+ event = list_first_entry(&qmi->event_list,
+ struct ath11k_qmi_driver_event, list);
+ list_del(&event->list);
+ spin_unlock(&qmi->event_lock);
+
+ if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) {
+ kfree(event);
+ return;
+ }
+
+ switch (event->type) {
+ case ATH11K_QMI_EVENT_SERVER_ARRIVE:
+ ret = ath11k_qmi_event_server_arrive(qmi);
+ if (ret < 0)
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH11K_QMI_EVENT_SERVER_EXIT:
+ set_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+
+ if (!ab->is_reset)
+ ath11k_core_pre_reconfigure_recovery(ab);
+ break;
+ case ATH11K_QMI_EVENT_REQUEST_MEM:
+ ret = ath11k_qmi_event_mem_request(qmi);
+ if (ret < 0)
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ case ATH11K_QMI_EVENT_FW_MEM_READY:
+ ret = ath11k_qmi_event_load_bdf(qmi);
+ if (ret < 0) {
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ }
+
+ ret = ath11k_qmi_wlanfw_m3_info_send(ab);
+ if (ret < 0) {
+ ath11k_warn(ab,
+ "failed to send qmi m3 info req: %d\n", ret);
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ }
+
+ break;
+ case ATH11K_QMI_EVENT_FW_INIT_DONE:
+ clear_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
+ ath11k_hal_dump_srng_stats(ab);
+ queue_work(ab->workqueue, &ab->restart_work);
+ break;
+ }
+
+ if (ab->qmi.cal_done == 0 &&
+ ath11k_core_coldboot_cal_support(ab)) {
+ ath11k_qmi_process_coldboot_calibration(ab);
+ } else {
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ ret = ath11k_core_qmi_firmware_ready(ab);
+ if (ret) {
+ set_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags);
+ break;
+ }
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+ }
+
+ break;
+ case ATH11K_QMI_EVENT_FW_READY:
+ /* For targets requiring a FW restart upon cold
+ * boot completion, there is no need to process
+ * FW ready; such targets will receive FW init
+ * done message after FW restart.
+ */
+ if (ab->hw_params.cbcal_restart_fw)
+ break;
+
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags);
+ ath11k_core_qmi_firmware_ready(ab);
+ set_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags);
+
+ break;
+ case ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE:
+ break;
+ default:
+ ath11k_warn(ab, "invalid qmi event type: %d", event->type);
+ break;
+ }
+ kfree(event);
+ spin_lock(&qmi->event_lock);
+ }
+ spin_unlock(&qmi->event_lock);
+}
+
+int ath11k_qmi_init_service(struct ath11k_base *ab)
+{
+ int ret;
+
+ memset(&ab->qmi.target, 0, sizeof(struct target_info));
+ memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
+ ab->qmi.ab = ab;
+
+ ab->qmi.target_mem_mode = ab->hw_params.fw_mem_mode;
+ ret = qmi_handle_init(&ab->qmi.handle, ATH11K_QMI_RESP_LEN_MAX,
+ &ath11k_qmi_ops, ath11k_qmi_msg_handlers);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to initialize qmi handle: %d\n", ret);
+ return ret;
+ }
+
+ ab->qmi.event_wq = alloc_ordered_workqueue("ath11k_qmi_driver_event", 0);
+ if (!ab->qmi.event_wq) {
+ ath11k_err(ab, "failed to allocate workqueue\n");
+ return -EFAULT;
+ }
+
+ INIT_LIST_HEAD(&ab->qmi.event_list);
+ spin_lock_init(&ab->qmi.event_lock);
+ INIT_WORK(&ab->qmi.event_work, ath11k_qmi_driver_event_work);
+
+ ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01,
+ ATH11K_QMI_WLFW_SERVICE_VERS_V01,
+ ab->qmi.service_ins_id);
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to add qmi lookup: %d\n", ret);
+ destroy_workqueue(ab->qmi.event_wq);
+ return ret;
+ }
+
+ return ret;
+}
+
+void ath11k_qmi_deinit_service(struct ath11k_base *ab)
+{
+ qmi_handle_release(&ab->qmi.handle);
+ cancel_work_sync(&ab->qmi.event_work);
+ destroy_workqueue(ab->qmi.event_wq);
+ ath11k_qmi_m3_free(ab);
+ ath11k_qmi_free_target_mem_chunk(ab);
+}
+EXPORT_SYMBOL(ath11k_qmi_deinit_service);
+
+void ath11k_qmi_free_resource(struct ath11k_base *ab)
+{
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ath11k_qmi_m3_free(ab);
+}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
new file mode 100644
index 0000000000..d477e2be81
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -0,0 +1,522 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_QMI_H
+#define ATH11K_QMI_H
+
+#include <linux/mutex.h>
+#include <linux/soc/qcom/qmi.h>
+
+#define ATH11K_HOST_VERSION_STRING "WIN"
+#define ATH11K_QMI_WLANFW_TIMEOUT_MS 10000
+#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
+#define ATH11K_QMI_CALDB_ADDRESS 0x4BA00000
+#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
+#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
+#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCN9074 0x07
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_WCN6750 0x03
+#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
+#define ATH11K_QMI_RESP_LEN_MAX 8192
+#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52
+#define ATH11K_QMI_CALDB_SIZE 0x480000
+#define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20
+#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 5
+
+#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
+#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x003E
+#define QMI_WLFW_FW_READY_IND_V01 0x0021
+#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038
+
+#define QMI_WLANFW_MAX_DATA_SIZE_V01 6144
+#define ATH11K_FIRMWARE_MODE_OFF 4
+#define ATH11K_COLD_BOOT_FW_RESET_DELAY (60 * HZ)
+
+#define ATH11K_QMI_DEVICE_BAR_SIZE 0x200000
+
+struct ath11k_base;
+
+enum ath11k_qmi_file_type {
+ ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
+ ATH11K_QMI_FILE_TYPE_CALDATA = 2,
+ ATH11K_QMI_FILE_TYPE_EEPROM,
+ ATH11K_QMI_MAX_FILE_TYPE,
+};
+
+enum ath11k_qmi_bdf_type {
+ ATH11K_QMI_BDF_TYPE_BIN = 0,
+ ATH11K_QMI_BDF_TYPE_ELF = 1,
+ ATH11K_QMI_BDF_TYPE_REGDB = 4,
+};
+
+enum ath11k_qmi_event_type {
+ ATH11K_QMI_EVENT_SERVER_ARRIVE,
+ ATH11K_QMI_EVENT_SERVER_EXIT,
+ ATH11K_QMI_EVENT_REQUEST_MEM,
+ ATH11K_QMI_EVENT_FW_MEM_READY,
+ ATH11K_QMI_EVENT_FW_READY,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_START,
+ ATH11K_QMI_EVENT_COLD_BOOT_CAL_DONE,
+ ATH11K_QMI_EVENT_REGISTER_DRIVER,
+ ATH11K_QMI_EVENT_UNREGISTER_DRIVER,
+ ATH11K_QMI_EVENT_RECOVERY,
+ ATH11K_QMI_EVENT_FORCE_FW_ASSERT,
+ ATH11K_QMI_EVENT_POWER_UP,
+ ATH11K_QMI_EVENT_POWER_DOWN,
+ ATH11K_QMI_EVENT_FW_INIT_DONE,
+ ATH11K_QMI_EVENT_MAX,
+};
+
+struct ath11k_qmi_driver_event {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+ void *data;
+};
+
+struct ath11k_qmi_ce_cfg {
+ const struct ce_pipe_config *tgt_ce;
+ int tgt_ce_len;
+ const struct service_to_pipe *svc_to_ce_map;
+ int svc_to_ce_map_len;
+ const u8 *shadow_reg;
+ int shadow_reg_len;
+ u32 *shadow_reg_v2;
+ int shadow_reg_v2_len;
+};
+
+struct ath11k_qmi_event_msg {
+ struct list_head list;
+ enum ath11k_qmi_event_type type;
+};
+
+struct target_mem_chunk {
+ u32 size;
+ u32 type;
+ u32 prev_size;
+ u32 prev_type;
+ dma_addr_t paddr;
+ u32 *vaddr;
+ void __iomem *iaddr;
+};
+
+struct target_info {
+ u32 chip_id;
+ u32 chip_family;
+ u32 board_id;
+ u32 soc_id;
+ u32 fw_version;
+ u32 eeprom_caldata;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ char bdf_ext[ATH11K_QMI_BDF_EXT_STR_LENGTH];
+};
+
+struct m3_mem_region {
+ u32 size;
+ dma_addr_t paddr;
+ void *vaddr;
+};
+
+struct ath11k_qmi {
+ struct ath11k_base *ab;
+ struct qmi_handle handle;
+ struct sockaddr_qrtr sq;
+ struct work_struct event_work;
+ struct workqueue_struct *event_wq;
+ struct list_head event_list;
+ spinlock_t event_lock; /* spinlock for qmi event list */
+ struct ath11k_qmi_ce_cfg ce_cfg;
+ struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+ u32 mem_seg_count;
+ u32 target_mem_mode;
+ bool target_mem_delayed;
+ u8 cal_done;
+ struct target_info target;
+ struct m3_mem_region m3_mem;
+ unsigned int service_ins_id;
+ wait_queue_head_t cold_boot_waitq;
+};
+
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 261
+#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
+#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
+#define QMI_WLFW_MAX_NUM_GPIO_V01 32
+#define QMI_IPQ8074_FW_MEM_MODE 0xFF
+#define HOST_DDR_REGION_TYPE 0x1
+#define BDF_MEM_REGION_TYPE 0x2
+#define M3_DUMP_REGION_TYPE 0x3
+#define CALDB_MEM_REGION_TYPE 0x4
+
+struct qmi_wlanfw_host_cap_req_msg_v01 {
+ u8 num_clients_valid;
+ u32 num_clients;
+ u8 wake_msi_valid;
+ u32 wake_msi;
+ u8 gpios_valid;
+ u32 gpios_len;
+ u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01];
+ u8 nm_modem_valid;
+ u8 nm_modem;
+ u8 bdf_support_valid;
+ u8 bdf_support;
+ u8 bdf_cache_support_valid;
+ u8 bdf_cache_support;
+ u8 m3_support_valid;
+ u8 m3_support;
+ u8 m3_cache_support_valid;
+ u8 m3_cache_support;
+ u8 cal_filesys_support_valid;
+ u8 cal_filesys_support;
+ u8 cal_cache_support_valid;
+ u8 cal_cache_support;
+ u8 cal_done_valid;
+ u8 cal_done;
+ u8 mem_bucket_valid;
+ u32 mem_bucket;
+ u8 mem_cfg_mode_valid;
+ u8 mem_cfg_mode;
+};
+
+struct qmi_wlanfw_host_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
+#define QMI_WLANFW_IND_REGISTER_REQ_V01 0x0020
+#define QMI_WLANFW_IND_REGISTER_RESP_MSG_V01_MAX_LEN 18
+#define QMI_WLANFW_IND_REGISTER_RESP_V01 0x0020
+#define QMI_WLANFW_CLIENT_ID 0x4b4e454c
+
+struct qmi_wlanfw_ind_register_req_msg_v01 {
+ u8 fw_ready_enable_valid;
+ u8 fw_ready_enable;
+ u8 initiate_cal_download_enable_valid;
+ u8 initiate_cal_download_enable;
+ u8 initiate_cal_update_enable_valid;
+ u8 initiate_cal_update_enable;
+ u8 msa_ready_enable_valid;
+ u8 msa_ready_enable;
+ u8 pin_connect_result_enable_valid;
+ u8 pin_connect_result_enable;
+ u8 client_id_valid;
+ u32 client_id;
+ u8 request_mem_enable_valid;
+ u8 request_mem_enable;
+ u8 fw_mem_ready_enable_valid;
+ u8 fw_mem_ready_enable;
+ u8 fw_init_done_enable_valid;
+ u8 fw_init_done_enable;
+ u8 rejuvenate_enable_valid;
+ u32 rejuvenate_enable;
+ u8 xo_cal_enable_valid;
+ u8 xo_cal_enable;
+ u8 cal_done_enable_valid;
+ u8 cal_done_enable;
+};
+
+struct qmi_wlanfw_ind_register_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 fw_status_valid;
+ u64 fw_status;
+};
+
+#define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN 1824
+#define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN 888
+#define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_REQUEST_MEM_IND_V01 0x0035
+#define QMI_WLANFW_RESPOND_MEM_REQ_V01 0x0036
+#define QMI_WLANFW_RESPOND_MEM_RESP_V01 0x0036
+#define QMI_WLANFW_MAX_NUM_MEM_CFG_V01 2
+
+struct qmi_wlanfw_mem_cfg_s_v01 {
+ u64 offset;
+ u32 size;
+ u8 secure_flag;
+};
+
+enum qmi_wlanfw_mem_type_enum_v01 {
+ WLANFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN,
+ QMI_WLANFW_MEM_TYPE_MSA_V01 = 0,
+ QMI_WLANFW_MEM_TYPE_DDR_V01 = 1,
+ QMI_WLANFW_MEM_BDF_V01 = 2,
+ QMI_WLANFW_MEM_M3_V01 = 3,
+ QMI_WLANFW_MEM_CAL_V01 = 4,
+ QMI_WLANFW_MEM_DPD_V01 = 5,
+ WLANFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+struct qmi_wlanfw_mem_seg_s_v01 {
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u32 mem_cfg_len;
+ struct qmi_wlanfw_mem_cfg_s_v01 mem_cfg[QMI_WLANFW_MAX_NUM_MEM_CFG_V01];
+};
+
+struct qmi_wlanfw_request_mem_ind_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_mem_seg_resp_s_v01 {
+ u64 addr;
+ u32 size;
+ enum qmi_wlanfw_mem_type_enum_v01 type;
+ u8 restore;
+};
+
+struct qmi_wlanfw_respond_mem_req_msg_v01 {
+ u32 mem_seg_len;
+ struct qmi_wlanfw_mem_seg_resp_s_v01 mem_seg[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+};
+
+struct qmi_wlanfw_respond_mem_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlfw_fw_init_done_ind_msg_v01 {
+ char placeholder;
+};
+
+#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
+#define QMI_WLANFW_CAP_REQ_V01 0x0024
+#define QMI_WLANFW_CAP_RESP_V01 0x0024
+#define QMI_WLANFW_DEVICE_INFO_REQ_V01 0x004C
+#define QMI_WLANFW_DEVICE_INFO_REQ_MSG_V01_MAX_LEN 0
+
+enum qmi_wlanfw_pipedir_enum_v01 {
+ QMI_WLFW_PIPEDIR_NONE_V01 = 0,
+ QMI_WLFW_PIPEDIR_IN_V01 = 1,
+ QMI_WLFW_PIPEDIR_OUT_V01 = 2,
+ QMI_WLFW_PIPEDIR_INOUT_V01 = 3,
+};
+
+struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01 {
+ __le32 pipe_num;
+ __le32 pipe_dir;
+ __le32 nentries;
+ __le32 nbytes_max;
+ __le32 flags;
+};
+
+struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01 {
+ __le32 service_id;
+ __le32 pipe_dir;
+ __le32 pipe_num;
+};
+
+struct qmi_wlanfw_shadow_reg_cfg_s_v01 {
+ u16 id;
+ u16 offset;
+};
+
+struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01 {
+ u32 addr;
+};
+
+struct qmi_wlanfw_memory_region_info_s_v01 {
+ u64 region_addr;
+ u32 size;
+ u8 secure_flag;
+};
+
+struct qmi_wlanfw_rf_chip_info_s_v01 {
+ u32 chip_id;
+ u32 chip_family;
+};
+
+struct qmi_wlanfw_rf_board_info_s_v01 {
+ u32 board_id;
+};
+
+struct qmi_wlanfw_soc_info_s_v01 {
+ u32 soc_id;
+};
+
+struct qmi_wlanfw_fw_version_info_s_v01 {
+ u32 fw_version;
+ char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
+};
+
+enum qmi_wlanfw_cal_temp_id_enum_v01 {
+ QMI_WLANFW_CAL_TEMP_IDX_0_V01 = 0,
+ QMI_WLANFW_CAL_TEMP_IDX_1_V01 = 1,
+ QMI_WLANFW_CAL_TEMP_IDX_2_V01 = 2,
+ QMI_WLANFW_CAL_TEMP_IDX_3_V01 = 3,
+ QMI_WLANFW_CAL_TEMP_IDX_4_V01 = 4,
+ QMI_WLANFW_CAL_TEMP_ID_MAX_V01 = 0xFF,
+};
+
+struct qmi_wlanfw_cap_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 chip_info_valid;
+ struct qmi_wlanfw_rf_chip_info_s_v01 chip_info;
+ u8 board_info_valid;
+ struct qmi_wlanfw_rf_board_info_s_v01 board_info;
+ u8 soc_info_valid;
+ struct qmi_wlanfw_soc_info_s_v01 soc_info;
+ u8 fw_version_info_valid;
+ struct qmi_wlanfw_fw_version_info_s_v01 fw_version_info;
+ u8 fw_build_id_valid;
+ char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
+ u8 num_macs_valid;
+ u8 num_macs;
+ u8 voltage_mv_valid;
+ u32 voltage_mv;
+ u8 time_freq_hz_valid;
+ u32 time_freq_hz;
+ u8 otp_version_valid;
+ u32 otp_version;
+ u8 eeprom_read_timeout_valid;
+ u32 eeprom_read_timeout;
+};
+
+struct qmi_wlanfw_cap_req_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_device_info_req_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_device_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u64 bar_addr;
+ u32 bar_size;
+ u8 bar_addr_valid;
+ u8 bar_size_valid;
+};
+
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN 6182
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_BDF_DOWNLOAD_RESP_V01 0x0025
+#define QMI_WLANFW_BDF_DOWNLOAD_REQ_V01 0x0025
+/* TODO: Need to check with MCL and FW team that data can be pointer and
+ * can be last element in structure
+ */
+struct qmi_wlanfw_bdf_download_req_msg_v01 {
+ u8 valid;
+ u8 file_id_valid;
+ enum qmi_wlanfw_cal_temp_id_enum_v01 file_id;
+ u8 total_size_valid;
+ u32 total_size;
+ u8 seg_id_valid;
+ u32 seg_id;
+ u8 data_valid;
+ u32 data_len;
+ u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+ u8 end_valid;
+ u8 end;
+ u8 bdf_type_valid;
+ u8 bdf_type;
+
+};
+
+struct qmi_wlanfw_bdf_download_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_M3_INFO_REQ_MSG_V01_MAX_MSG_LEN 18
+#define QMI_WLANFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
+#define QMI_WLANFW_M3_INFO_RESP_V01 0x003C
+#define QMI_WLANFW_M3_INFO_REQ_V01 0x003C
+
+struct qmi_wlanfw_m3_info_req_msg_v01 {
+ u64 addr;
+ u32 size;
+};
+
+struct qmi_wlanfw_m3_info_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLANFW_WLAN_MODE_REQ_MSG_V01_MAX_LEN 11
+#define QMI_WLANFW_WLAN_MODE_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_CFG_REQ_MSG_V01_MAX_LEN 803
+#define QMI_WLANFW_WLAN_CFG_RESP_MSG_V01_MAX_LEN 7
+#define QMI_WLANFW_WLAN_INI_REQ_MSG_V01_MAX_LEN 4
+#define QMI_WLANFW_WLAN_MODE_REQ_V01 0x0022
+#define QMI_WLANFW_WLAN_MODE_RESP_V01 0x0022
+#define QMI_WLANFW_WLAN_CFG_REQ_V01 0x0023
+#define QMI_WLANFW_WLAN_CFG_RESP_V01 0x0023
+#define QMI_WLANFW_WLAN_INI_REQ_V01 0x002F
+#define QMI_WLANFW_MAX_STR_LEN_V01 16
+#define QMI_WLANFW_MAX_NUM_CE_V01 12
+#define QMI_WLANFW_MAX_NUM_SVC_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V01 24
+#define QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01 36
+
+struct qmi_wlanfw_wlan_mode_req_msg_v01 {
+ u32 mode;
+ u8 hw_debug_valid;
+ u8 hw_debug;
+};
+
+struct qmi_wlanfw_wlan_mode_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_cfg_req_msg_v01 {
+ u8 host_version_valid;
+ char host_version[QMI_WLANFW_MAX_STR_LEN_V01 + 1];
+ u8 tgt_cfg_valid;
+ u32 tgt_cfg_len;
+ struct qmi_wlanfw_ce_tgt_pipe_cfg_s_v01
+ tgt_cfg[QMI_WLANFW_MAX_NUM_CE_V01];
+ u8 svc_cfg_valid;
+ u32 svc_cfg_len;
+ struct qmi_wlanfw_ce_svc_pipe_cfg_s_v01
+ svc_cfg[QMI_WLANFW_MAX_NUM_SVC_V01];
+ u8 shadow_reg_valid;
+ u32 shadow_reg_len;
+ struct qmi_wlanfw_shadow_reg_cfg_s_v01
+ shadow_reg[QMI_WLANFW_MAX_NUM_SHADOW_REG_V01];
+ u8 shadow_reg_v2_valid;
+ u32 shadow_reg_v2_len;
+ struct qmi_wlanfw_shadow_reg_v2_cfg_s_v01
+ shadow_reg_v2[QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01];
+};
+
+struct qmi_wlanfw_wlan_cfg_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+struct qmi_wlanfw_wlan_ini_req_msg_v01 {
+ /* Must be set to true if enablefwlog is being passed */
+ u8 enablefwlog_valid;
+ u8 enablefwlog;
+};
+
+struct qmi_wlanfw_wlan_ini_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+};
+
+int ath11k_qmi_firmware_start(struct ath11k_base *ab,
+ u32 mode);
+void ath11k_qmi_firmware_stop(struct ath11k_base *ab);
+void ath11k_qmi_deinit_service(struct ath11k_base *ab);
+int ath11k_qmi_init_service(struct ath11k_base *ab);
+void ath11k_qmi_free_resource(struct ath11k_base *ab);
+int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
new file mode 100644
index 0000000000..7f9fb968da
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#include <linux/rtnetlink.h>
+
+#include "core.h"
+#include "debug.h"
+
+/* World regdom to be used in case default regd from fw is unavailable */
+#define ATH11K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
+#define ATH11K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+#define ATH11K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
+ NL80211_RRF_NO_IR)
+
+#define ETSI_WEATHER_RADAR_BAND_LOW 5590
+#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
+#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
+
+static const struct ieee80211_regdomain ath11k_world_regd = {
+ .n_reg_rules = 3,
+ .alpha2 = "00",
+ .reg_rules = {
+ ATH11K_2GHZ_CH01_11,
+ ATH11K_5GHZ_5150_5350,
+ ATH11K_5GHZ_5725_5850,
+ }
+};
+
+static bool ath11k_regdom_changes(struct ath11k *ar, char *alpha2)
+{
+ const struct ieee80211_regdomain *regd;
+
+ regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
+ /* This can happen during wiphy registration where the previous
+ * user request is received before we update the regd received
+ * from firmware.
+ */
+ if (!regd)
+ return true;
+
+ return memcmp(regd->alpha2, alpha2, 2) != 0;
+}
+
+static void
+ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct wmi_init_country_params init_country_param;
+ struct wmi_set_current_country_params set_current_param = {};
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Regulatory Notification received for %s\n", wiphy_name(wiphy));
+
+ /* Currently supporting only General User Hints. Cell base user
+ * hints to be handled later.
+ * Hints from other sources like Core, Beacons are not expected for
+ * self managed wiphy's
+ */
+ if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
+ ath11k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
+ return;
+ }
+
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "Country Setting is not allowed\n");
+ return;
+ }
+
+ if (!ath11k_regdom_changes(ar, request->alpha2)) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Country is already set\n");
+ return;
+ }
+
+ /* Set the country code to the firmware and will receive
+ * the WMI_REG_CHAN_LIST_CC EVENT for updating the
+ * reg info
+ */
+ if (ar->ab->hw_params.current_cc_support) {
+ memcpy(&set_current_param.alpha2, request->alpha2, 2);
+ memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
+ ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed set current country code: %d\n", ret);
+ } else {
+ init_country_param.flags = ALPHA_IS_SET;
+ memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
+ init_country_param.cc_info.alpha2[2] = 0;
+
+ ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+ }
+
+ ath11k_mac_11d_scan_stop(ar);
+ ar->regdom_set_by_user = true;
+}
+
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait)
+{
+ struct ieee80211_supported_band **bands;
+ struct scan_chan_list_params *params;
+ struct ieee80211_channel *channel;
+ struct ieee80211_hw *hw = ar->hw;
+ struct channel_param *ch;
+ enum nl80211_band band;
+ int num_channels = 0;
+ int i, ret, left;
+
+ if (wait && ar->state_11d != ATH11K_11D_IDLE) {
+ left = wait_for_completion_timeout(&ar->completed_11d_scan,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive 11d scan complete: timed out\n");
+ ar->state_11d = ATH11K_11D_IDLE;
+ }
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "11d scan wait left time %d\n", left);
+ }
+
+ if (wait &&
+ (ar->scan.state == ATH11K_SCAN_STARTING ||
+ ar->scan.state == ATH11K_SCAN_RUNNING)) {
+ left = wait_for_completion_timeout(&ar->scan.completed,
+ ATH11K_SCAN_TIMEOUT_HZ);
+ if (!left)
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "failed to receive hw scan complete: timed out\n");
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+ "hw scan wait left time %d\n", left);
+ }
+
+ if (ar->state == ATH11K_STATE_RESTARTING)
+ return 0;
+
+ bands = hw->wiphy->bands;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ if (bands[band]->channels[i].flags &
+ IEEE80211_CHAN_DISABLED)
+ continue;
+
+ num_channels++;
+ }
+ }
+
+ if (WARN_ON(!num_channels))
+ return -EINVAL;
+
+ params = kzalloc(struct_size(params, ch_param, num_channels),
+ GFP_KERNEL);
+ if (!params)
+ return -ENOMEM;
+
+ params->pdev_id = ar->pdev->pdev_id;
+ params->nallchans = num_channels;
+
+ ch = params->ch_param;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!bands[band])
+ continue;
+
+ for (i = 0; i < bands[band]->n_channels; i++) {
+ channel = &bands[band]->channels[i];
+
+ if (channel->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ /* TODO: Set to true/false based on some condition? */
+ ch->allow_ht = true;
+ ch->allow_vht = true;
+ ch->allow_he = true;
+
+ ch->dfs_set =
+ !!(channel->flags & IEEE80211_CHAN_RADAR);
+ ch->is_chan_passive = !!(channel->flags &
+ IEEE80211_CHAN_NO_IR);
+ ch->is_chan_passive |= ch->dfs_set;
+ ch->mhz = channel->center_freq;
+ ch->cfreq1 = channel->center_freq;
+ ch->minpower = 0;
+ ch->maxpower = channel->max_power * 2;
+ ch->maxregpower = channel->max_reg_power * 2;
+ ch->antennamax = channel->max_antenna_gain * 2;
+
+ /* TODO: Use appropriate phymodes */
+ if (channel->band == NL80211_BAND_2GHZ)
+ ch->phy_mode = MODE_11G;
+ else
+ ch->phy_mode = MODE_11A;
+
+ if (channel->band == NL80211_BAND_6GHZ &&
+ cfg80211_channel_is_psc(channel))
+ ch->psc_channel = true;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "mac channel [%d/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
+ i, params->nallchans,
+ ch->mhz, ch->maxpower, ch->maxregpower,
+ ch->antennamax, ch->phy_mode);
+
+ ch++;
+ /* TODO: use quarrter/half rate, cfreq12, dfs_cfreq2
+ * set_agile, reg_class_idx
+ */
+ }
+ }
+
+ ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+ kfree(params);
+
+ return ret;
+}
+
+static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
+ struct ieee80211_regdomain *regd_copy)
+{
+ u8 i;
+
+ /* The caller should have checked error conditions */
+ memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
+
+ for (i = 0; i < regd_orig->n_reg_rules; i++)
+ memcpy(&regd_copy->reg_rules[i], &regd_orig->reg_rules[i],
+ sizeof(struct ieee80211_reg_rule));
+}
+
+int ath11k_regd_update(struct ath11k *ar)
+{
+ struct ieee80211_regdomain *regd, *regd_copy = NULL;
+ int ret, regd_len, pdev_id;
+ struct ath11k_base *ab;
+
+ ab = ar->ab;
+ pdev_id = ar->pdev_idx;
+
+ spin_lock_bh(&ab->base_lock);
+
+ /* Prefer the latest regd update over default if it's available */
+ if (ab->new_regd[pdev_id]) {
+ regd = ab->new_regd[pdev_id];
+ } else {
+ /* Apply the regd received during init through
+ * WMI_REG_CHAN_LIST_CC event. In case of failure to
+ * receive the regd, initialize with a default world
+ * regulatory.
+ */
+ if (ab->default_regd[pdev_id]) {
+ regd = ab->default_regd[pdev_id];
+ } else {
+ ath11k_warn(ab,
+ "failed to receive default regd during init\n");
+ regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
+ }
+ }
+
+ if (!regd) {
+ ret = -EINVAL;
+ spin_unlock_bh(&ab->base_lock);
+ goto err;
+ }
+
+ regd_len = sizeof(*regd) + (regd->n_reg_rules *
+ sizeof(struct ieee80211_reg_rule));
+
+ regd_copy = kzalloc(regd_len, GFP_ATOMIC);
+ if (regd_copy)
+ ath11k_copy_regd(regd, regd_copy);
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!regd_copy) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = regulatory_set_wiphy_regd(ar->hw->wiphy, regd_copy);
+
+ kfree(regd_copy);
+
+ if (ret)
+ goto err;
+
+ if (ar->state == ATH11K_STATE_ON) {
+ ret = ath11k_reg_update_chan_list(ar, true);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+err:
+ ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
+ return ret;
+}
+
+static enum nl80211_dfs_regions
+ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region)
+{
+ switch (dfs_region) {
+ case ATH11K_DFS_REG_FCC:
+ case ATH11K_DFS_REG_CN:
+ return NL80211_DFS_FCC;
+ case ATH11K_DFS_REG_ETSI:
+ case ATH11K_DFS_REG_KR:
+ return NL80211_DFS_ETSI;
+ case ATH11K_DFS_REG_MKK:
+ case ATH11K_DFS_REG_MKK_N:
+ return NL80211_DFS_JP;
+ default:
+ return NL80211_DFS_UNSET;
+ }
+}
+
+static u32 ath11k_map_fw_reg_flags(u16 reg_flags)
+{
+ u32 flags = 0;
+
+ if (reg_flags & REGULATORY_CHAN_NO_IR)
+ flags = NL80211_RRF_NO_IR;
+
+ if (reg_flags & REGULATORY_CHAN_RADAR)
+ flags |= NL80211_RRF_DFS;
+
+ if (reg_flags & REGULATORY_CHAN_NO_OFDM)
+ flags |= NL80211_RRF_NO_OFDM;
+
+ if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
+ flags |= NL80211_RRF_NO_OUTDOOR;
+
+ if (reg_flags & REGULATORY_CHAN_NO_HT40)
+ flags |= NL80211_RRF_NO_HT40;
+
+ if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
+ flags |= NL80211_RRF_NO_80MHZ;
+
+ if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
+ flags |= NL80211_RRF_NO_160MHZ;
+
+ return flags;
+}
+
+static bool
+ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ if ((start_freq1 >= start_freq2 &&
+ start_freq1 < end_freq2) ||
+ (start_freq2 > start_freq1 &&
+ start_freq2 < end_freq1))
+ return true;
+
+ /* TODO: Should we restrict intersection feasibility
+ * based on min bandwidth of the intersected region also,
+ * say the intersected rule should have a min bandwidth
+ * of 20MHz?
+ */
+
+ return false;
+}
+
+static void ath11k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
+ struct ieee80211_reg_rule *rule2,
+ struct ieee80211_reg_rule *new_rule)
+{
+ u32 start_freq1, end_freq1;
+ u32 start_freq2, end_freq2;
+ u32 freq_diff, max_bw;
+
+ start_freq1 = rule1->freq_range.start_freq_khz;
+ start_freq2 = rule2->freq_range.start_freq_khz;
+
+ end_freq1 = rule1->freq_range.end_freq_khz;
+ end_freq2 = rule2->freq_range.end_freq_khz;
+
+ new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
+ start_freq2);
+ new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
+
+ freq_diff = new_rule->freq_range.end_freq_khz -
+ new_rule->freq_range.start_freq_khz;
+ max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
+ rule2->freq_range.max_bandwidth_khz);
+ new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
+
+ new_rule->power_rule.max_antenna_gain =
+ min_t(u32, rule1->power_rule.max_antenna_gain,
+ rule2->power_rule.max_antenna_gain);
+
+ new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
+ rule2->power_rule.max_eirp);
+
+ /* Use the flags of both the rules */
+ new_rule->flags = rule1->flags | rule2->flags;
+
+ /* To be safe, lts use the max cac timeout of both rules */
+ new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
+ rule2->dfs_cac_ms);
+}
+
+static struct ieee80211_regdomain *
+ath11k_regd_intersect(struct ieee80211_regdomain *default_regd,
+ struct ieee80211_regdomain *curr_regd)
+{
+ u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
+ struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
+ struct ieee80211_regdomain *new_regd = NULL;
+ u8 i, j, k;
+
+ num_old_regd_rules = default_regd->n_reg_rules;
+ num_curr_regd_rules = curr_regd->n_reg_rules;
+ num_new_regd_rules = 0;
+
+ /* Find the number of intersecting rules to allocate new regd memory */
+ for (i = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ num_new_regd_rules++;
+ }
+ }
+
+ if (!num_new_regd_rules)
+ return NULL;
+
+ new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
+ sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+
+ if (!new_regd)
+ return NULL;
+
+ /* We set the new country and dfs region directly and only trim
+ * the freq, power, antenna gain by intersecting with the
+ * default regdomain. Also MAX of the dfs cac timeout is selected.
+ */
+ new_regd->n_reg_rules = num_new_regd_rules;
+ memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
+ new_regd->dfs_region = curr_regd->dfs_region;
+ new_rule = new_regd->reg_rules;
+
+ for (i = 0, k = 0; i < num_old_regd_rules; i++) {
+ old_rule = default_regd->reg_rules + i;
+ for (j = 0; j < num_curr_regd_rules; j++) {
+ curr_rule = curr_regd->reg_rules + j;
+
+ if (ath11k_reg_can_intersect(old_rule, curr_rule))
+ ath11k_reg_intersect_rules(old_rule, curr_rule,
+ (new_rule + k++));
+ }
+ }
+ return new_regd;
+}
+
+static const char *
+ath11k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
+{
+ switch (dfs_region) {
+ case NL80211_DFS_FCC:
+ return "FCC";
+ case NL80211_DFS_ETSI:
+ return "ETSI";
+ case NL80211_DFS_JP:
+ return "JP";
+ default:
+ return "UNSET";
+ }
+}
+
+static u16
+ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
+{
+ u16 bw;
+
+ if (end_freq <= start_freq)
+ return 0;
+
+ bw = end_freq - start_freq;
+ bw = min_t(u16, bw, max_bw);
+
+ if (bw >= 80 && bw < 160)
+ bw = 80;
+ else if (bw >= 40 && bw < 80)
+ bw = 40;
+ else if (bw >= 20 && bw < 40)
+ bw = 20;
+ else
+ bw = 0;
+
+ return bw;
+}
+
+static void
+ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
+ u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
+ u32 reg_flags)
+{
+ reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
+ reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
+ reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
+ reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
+ reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
+ reg_rule->flags = reg_flags;
+}
+
+static void
+ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
+ struct ieee80211_regdomain *regd,
+ struct cur_reg_rule *reg_rule,
+ u8 *rule_idx, u32 flags, u16 max_bw)
+{
+ u32 start_freq;
+ u32 end_freq;
+ u16 bw;
+ u8 i;
+
+ i = *rule_idx;
+
+ /* there might be situations when even the input rule must be dropped */
+ i--;
+
+ /* frequencies below weather radar */
+ bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
+ if (bw > 0) {
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i,
+ reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW, bw, reg_rule->ant_gain,
+ reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+
+ /* weather radar frequencies */
+ start_freq = max_t(u32, reg_rule->start_freq,
+ ETSI_WEATHER_RADAR_BAND_LOW);
+ end_freq = min_t(u32, reg_rule->end_freq, ETSI_WEATHER_RADAR_BAND_HIGH);
+
+ bw = ath11k_reg_adjust_bw(start_freq, end_freq, max_bw);
+ if (bw > 0) {
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i, start_freq,
+ end_freq, bw, reg_rule->ant_gain,
+ reg_rule->reg_power, flags);
+
+ regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, start_freq, end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ regd->reg_rules[i].dfs_cac_ms, flags);
+ }
+
+ /* frequencies above weather radar */
+ bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, max_bw);
+ if (bw > 0) {
+ i++;
+
+ ath11k_reg_update_rule(regd->reg_rules + i,
+ ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, ETSI_WEATHER_RADAR_BAND_HIGH,
+ reg_rule->end_freq, bw, reg_rule->ant_gain,
+ reg_rule->reg_power, regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+
+ *rule_idx = i;
+}
+
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect)
+{
+ struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
+ struct cur_reg_rule *reg_rule;
+ u8 i = 0, j = 0, k = 0;
+ u8 num_rules;
+ u16 max_bw;
+ u32 flags;
+ char alpha2[3];
+
+ num_rules = reg_info->num_5ghz_reg_rules + reg_info->num_2ghz_reg_rules;
+
+ /* FIXME: Currently taking reg rules for 6 GHz only from Indoor AP mode list.
+ * This can be updated after complete 6 GHz regulatory support is added.
+ */
+ if (reg_info->is_ext_reg_event)
+ num_rules += reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP];
+
+ if (!num_rules)
+ goto ret;
+
+ /* Add max additional rules to accommodate weather radar band */
+ if (reg_info->dfs_region == ATH11K_DFS_REG_ETSI)
+ num_rules += 2;
+
+ tmp_regd = kzalloc(sizeof(*tmp_regd) +
+ (num_rules * sizeof(struct ieee80211_reg_rule)),
+ GFP_ATOMIC);
+ if (!tmp_regd)
+ goto ret;
+
+ memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+ alpha2[2] = '\0';
+ tmp_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region);
+
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "Country %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
+ alpha2, ath11k_reg_get_regdom_str(tmp_regd->dfs_region),
+ reg_info->dfs_region, num_rules);
+ /* Update reg_rules[] below. Firmware is expected to
+ * send these rules in order(2 GHz rules first and then 5 GHz)
+ */
+ for (; i < num_rules; i++) {
+ if (reg_info->num_2ghz_reg_rules &&
+ (i < reg_info->num_2ghz_reg_rules)) {
+ reg_rule = reg_info->reg_rules_2ghz_ptr + i;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_2ghz);
+ flags = 0;
+ } else if (reg_info->num_5ghz_reg_rules &&
+ (j < reg_info->num_5ghz_reg_rules)) {
+ reg_rule = reg_info->reg_rules_5ghz_ptr + j++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_5ghz);
+
+ /* FW doesn't pass NL80211_RRF_AUTO_BW flag for
+ * BW Auto correction, we can enable this by default
+ * for all 5G rules here. The regulatory core performs
+ * BW correction if required and applies flags as
+ * per other BW rule flags we pass from here
+ */
+ flags = NL80211_RRF_AUTO_BW;
+ } else if (reg_info->is_ext_reg_event &&
+ reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] &&
+ (k < reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP])) {
+ reg_rule = reg_info->reg_rules_6ghz_ap_ptr[WMI_REG_INDOOR_AP] +
+ k++;
+ max_bw = min_t(u16, reg_rule->max_bw,
+ reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP]);
+ flags = NL80211_RRF_AUTO_BW;
+ } else {
+ break;
+ }
+
+ flags |= ath11k_map_fw_reg_flags(reg_rule->flags);
+
+ ath11k_reg_update_rule(tmp_regd->reg_rules + i,
+ reg_rule->start_freq,
+ reg_rule->end_freq, max_bw,
+ reg_rule->ant_gain, reg_rule->reg_power,
+ flags);
+
+ /* Update dfs cac timeout if the dfs domain is ETSI and the
+ * new rule covers weather radar band.
+ * Default value of '0' corresponds to 60s timeout, so no
+ * need to update that for other rules.
+ */
+ if (flags & NL80211_RRF_DFS &&
+ reg_info->dfs_region == ATH11K_DFS_REG_ETSI &&
+ (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
+ reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
+ ath11k_reg_update_weather_radar_band(ab, tmp_regd,
+ reg_rule, &i,
+ flags, max_bw);
+ continue;
+ }
+
+ if (reg_info->is_ext_reg_event) {
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms, flags,
+ reg_rule->psd_flag, reg_rule->psd_eirp);
+ } else {
+ ath11k_dbg(ab, ATH11K_DBG_REG,
+ "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
+ i + 1, reg_rule->start_freq, reg_rule->end_freq,
+ max_bw, reg_rule->ant_gain, reg_rule->reg_power,
+ tmp_regd->reg_rules[i].dfs_cac_ms,
+ flags);
+ }
+ }
+
+ tmp_regd->n_reg_rules = i;
+
+ if (intersect) {
+ default_regd = ab->default_regd[reg_info->phy_id];
+
+ /* Get a new regd by intersecting the received regd with
+ * our default regd.
+ */
+ new_regd = ath11k_regd_intersect(default_regd, tmp_regd);
+ kfree(tmp_regd);
+ if (!new_regd) {
+ ath11k_warn(ab, "Unable to create intersected regdomain\n");
+ goto ret;
+ }
+ } else {
+ new_regd = tmp_regd;
+ }
+
+ret:
+ return new_regd;
+}
+
+void ath11k_regd_update_work(struct work_struct *work)
+{
+ struct ath11k *ar = container_of(work, struct ath11k,
+ regd_update_work);
+ int ret;
+
+ ret = ath11k_regd_update(ar);
+ if (ret) {
+ /* Firmware has already moved to the new regd. We need
+ * to maintain channel consistency across FW, Host driver
+ * and userspace. Hence as a fallback mechanism we can set
+ * the prev or default country code to the firmware.
+ */
+ /* TODO: Implement Fallback Mechanism */
+ }
+}
+
+void ath11k_reg_init(struct ath11k *ar)
+{
+ ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+ ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
+}
+
+void ath11k_reg_free(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.max_radios; i++) {
+ kfree(ab->default_regd[i]);
+ kfree(ab->new_regd[i]);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
new file mode 100644
index 0000000000..2f284f2637
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_REG_H
+#define ATH11K_REG_H
+
+#include <linux/kernel.h>
+#include <net/regulatory.h>
+
+struct ath11k_base;
+struct ath11k;
+
+/* DFS regdomains supported by Firmware */
+enum ath11k_dfs_region {
+ ATH11K_DFS_REG_UNSET,
+ ATH11K_DFS_REG_FCC,
+ ATH11K_DFS_REG_ETSI,
+ ATH11K_DFS_REG_MKK,
+ ATH11K_DFS_REG_CN,
+ ATH11K_DFS_REG_KR,
+ ATH11K_DFS_REG_MKK_N,
+ ATH11K_DFS_REG_UNDEF,
+};
+
+/* ATH11K Regulatory API's */
+void ath11k_reg_init(struct ath11k *ar);
+void ath11k_reg_free(struct ath11k_base *ab);
+void ath11k_regd_update_work(struct work_struct *work);
+struct ieee80211_regdomain *
+ath11k_reg_build_regd(struct ath11k_base *ab,
+ struct cur_regulatory_info *reg_info, bool intersect);
+int ath11k_regd_update(struct ath11k *ar);
+int ath11k_reg_update_chan_list(struct ath11k *ar, bool wait);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/rx_desc.h b/drivers/net/wireless/ath/ath11k/rx_desc.h
new file mode 100644
index 0000000000..786d5f36f5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/rx_desc.h
@@ -0,0 +1,1505 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+#ifndef ATH11K_RX_DESC_H
+#define ATH11K_RX_DESC_H
+
+enum rx_desc_rxpcu_filter {
+ RX_DESC_RXPCU_FILTER_PASS,
+ RX_DESC_RXPCU_FILTER_MONITOR_CLIENT,
+ RX_DESC_RXPCU_FILTER_MONITOR_OTHER,
+};
+
+/* rxpcu_filter_pass
+ * This MPDU passed the normal frame filter programming of rxpcu.
+ *
+ * rxpcu_filter_monitor_client
+ * This MPDU did not pass the regular frame filter and would
+ * have been dropped, were it not for the frame fitting into the
+ * 'monitor_client' category.
+ *
+ * rxpcu_filter_monitor_other
+ * This MPDU did not pass the regular frame filter and also did
+ * not pass the rxpcu_monitor_client filter. It would have been
+ * dropped accept that it did pass the 'monitor_other' category.
+ */
+
+#define RX_DESC_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_DESC_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+enum rx_desc_sw_frame_grp_id {
+ RX_DESC_SW_FRAME_GRP_ID_NDP_FRAME,
+ RX_DESC_SW_FRAME_GRP_ID_MCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_UCAST_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_NULL_DATA,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_0111,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1000,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1001,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1010,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1011,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1100,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1101,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1110,
+ RX_DESC_SW_FRAME_GRP_ID_MGMT_1111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_0111,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1000,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1001,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1010,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1011,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1100,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1101,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1110,
+ RX_DESC_SW_FRAME_GRP_ID_CTRL_1111,
+ RX_DESC_SW_FRAME_GRP_ID_UNSUPPORTED,
+ RX_DESC_SW_FRAME_GRP_ID_PHY_ERR,
+};
+
+enum rx_desc_decap_type {
+ RX_DESC_DECAP_TYPE_RAW,
+ RX_DESC_DECAP_TYPE_NATIVE_WIFI,
+ RX_DESC_DECAP_TYPE_ETHERNET2_DIX,
+ RX_DESC_DECAP_TYPE_8023,
+};
+
+enum rx_desc_decrypt_status_code {
+ RX_DESC_DECRYPT_STATUS_CODE_OK,
+ RX_DESC_DECRYPT_STATUS_CODE_UNPROTECTED_FRAME,
+ RX_DESC_DECRYPT_STATUS_CODE_DATA_ERR,
+ RX_DESC_DECRYPT_STATUS_CODE_KEY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_PEER_ENTRY_INVALID,
+ RX_DESC_DECRYPT_STATUS_CODE_OTHER,
+};
+
+#define RX_ATTENTION_INFO1_FIRST_MPDU BIT(0)
+#define RX_ATTENTION_INFO1_RSVD_1A BIT(1)
+#define RX_ATTENTION_INFO1_MCAST_BCAST BIT(2)
+#define RX_ATTENTION_INFO1_AST_IDX_NOT_FOUND BIT(3)
+#define RX_ATTENTION_INFO1_AST_IDX_TIMEDOUT BIT(4)
+#define RX_ATTENTION_INFO1_POWER_MGMT BIT(5)
+#define RX_ATTENTION_INFO1_NON_QOS BIT(6)
+#define RX_ATTENTION_INFO1_NULL_DATA BIT(7)
+#define RX_ATTENTION_INFO1_MGMT_TYPE BIT(8)
+#define RX_ATTENTION_INFO1_CTRL_TYPE BIT(9)
+#define RX_ATTENTION_INFO1_MORE_DATA BIT(10)
+#define RX_ATTENTION_INFO1_EOSP BIT(11)
+#define RX_ATTENTION_INFO1_A_MSDU_ERROR BIT(12)
+#define RX_ATTENTION_INFO1_FRAGMENT BIT(13)
+#define RX_ATTENTION_INFO1_ORDER BIT(14)
+#define RX_ATTENTION_INFO1_CCE_MATCH BIT(15)
+#define RX_ATTENTION_INFO1_OVERFLOW_ERR BIT(16)
+#define RX_ATTENTION_INFO1_MSDU_LEN_ERR BIT(17)
+#define RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL BIT(18)
+#define RX_ATTENTION_INFO1_IP_CKSUM_FAIL BIT(19)
+#define RX_ATTENTION_INFO1_SA_IDX_INVALID BIT(20)
+#define RX_ATTENTION_INFO1_DA_IDX_INVALID BIT(21)
+#define RX_ATTENTION_INFO1_RSVD_1B BIT(22)
+#define RX_ATTENTION_INFO1_RX_IN_TX_DECRYPT_BYP BIT(23)
+#define RX_ATTENTION_INFO1_ENCRYPT_REQUIRED BIT(24)
+#define RX_ATTENTION_INFO1_DIRECTED BIT(25)
+#define RX_ATTENTION_INFO1_BUFFER_FRAGMENT BIT(26)
+#define RX_ATTENTION_INFO1_MPDU_LEN_ERR BIT(27)
+#define RX_ATTENTION_INFO1_TKIP_MIC_ERR BIT(28)
+#define RX_ATTENTION_INFO1_DECRYPT_ERR BIT(29)
+#define RX_ATTENTION_INFO1_UNDECRYPT_FRAME_ERR BIT(30)
+#define RX_ATTENTION_INFO1_FCS_ERR BIT(31)
+
+#define RX_ATTENTION_INFO2_FLOW_IDX_TIMEOUT BIT(0)
+#define RX_ATTENTION_INFO2_FLOW_IDX_INVALID BIT(1)
+#define RX_ATTENTION_INFO2_WIFI_PARSER_ERR BIT(2)
+#define RX_ATTENTION_INFO2_AMSDU_PARSER_ERR BIT(3)
+#define RX_ATTENTION_INFO2_SA_IDX_TIMEOUT BIT(4)
+#define RX_ATTENTION_INFO2_DA_IDX_TIMEOUT BIT(5)
+#define RX_ATTENTION_INFO2_MSDU_LIMIT_ERR BIT(6)
+#define RX_ATTENTION_INFO2_DA_IS_VALID BIT(7)
+#define RX_ATTENTION_INFO2_DA_IS_MCBC BIT(8)
+#define RX_ATTENTION_INFO2_SA_IS_VALID BIT(9)
+#define RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE GENMASK(12, 10)
+#define RX_ATTENTION_INFO2_RX_BITMAP_NOT_UPDED BIT(13)
+#define RX_ATTENTION_INFO2_MSDU_DONE BIT(31)
+
+struct rx_attention {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+} __packed;
+
+/* rx_attention
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * first_mpdu
+ * Indicates the first MSDU of the PPDU. If both first_mpdu
+ * and last_mpdu are set in the MSDU then this is a not an
+ * A-MPDU frame but a stand alone MPDU. Interior MPDU in an
+ * A-MPDU shall have both first_mpdu and last_mpdu bits set to
+ * 0. The PPDU start status will only be valid when this bit
+ * is set.
+ *
+ * mcast_bcast
+ * Multicast / broadcast indicator. Only set when the MAC
+ * address 1 bit 0 is set indicating mcast/bcast and the BSSID
+ * matches one of the 4 BSSID registers. Only set when
+ * first_msdu is set.
+ *
+ * ast_index_not_found
+ * Only valid when first_msdu is set. Indicates no AST matching
+ * entries within the max search count.
+ *
+ * ast_index_timeout
+ * Only valid when first_msdu is set. Indicates an unsuccessful
+ * search in the address search table due to timeout.
+ *
+ * power_mgmt
+ * Power management bit set in the 802.11 header. Only set
+ * when first_msdu is set.
+ *
+ * non_qos
+ * Set if packet is not a non-QoS data frame. Only set when
+ * first_msdu is set.
+ *
+ * null_data
+ * Set if frame type indicates either null data or QoS null
+ * data format. Only set when first_msdu is set.
+ *
+ * mgmt_type
+ * Set if packet is a management packet. Only set when
+ * first_msdu is set.
+ *
+ * ctrl_type
+ * Set if packet is a control packet. Only set when first_msdu
+ * is set.
+ *
+ * more_data
+ * Set if more bit in frame control is set. Only set when
+ * first_msdu is set.
+ *
+ * eosp
+ * Set if the EOSP (end of service period) bit in the QoS
+ * control field is set. Only set when first_msdu is set.
+ *
+ * a_msdu_error
+ * Set if number of MSDUs in A-MSDU is above a threshold or if the
+ * size of the MSDU is invalid. This receive buffer will contain
+ * all of the remainder of MSDUs in this MPDU w/o decapsulation.
+ *
+ * fragment
+ * Indicates that this is an 802.11 fragment frame. This is
+ * set when either the more_frag bit is set in the frame
+ * control or the fragment number is not zero. Only set when
+ * first_msdu is set.
+ *
+ * order
+ * Set if the order bit in the frame control is set. Only set
+ * when first_msdu is set.
+ *
+ * cce_match
+ * Indicates that this status has a corresponding MSDU that
+ * requires FW processing. The OLE will have classification
+ * ring mask registers which will indicate the ring(s) for
+ * packets and descriptors which need FW attention.
+ *
+ * overflow_err
+ * PCU Receive FIFO does not have enough space to store the
+ * full receive packet. Enough space is reserved in the
+ * receive FIFO for the status is written. This MPDU remaining
+ * packets in the PPDU will be filtered and no Ack response
+ * will be transmitted.
+ *
+ * msdu_length_err
+ * Indicates that the MSDU length from the 802.3 encapsulated
+ * length field extends beyond the MPDU boundary.
+ *
+ * tcp_udp_chksum_fail
+ * Indicates that the computed checksum (tcp_udp_chksum) did
+ * not match the checksum in the TCP/UDP header.
+ *
+ * ip_chksum_fail
+ * Indicates that the computed checksum did not match the
+ * checksum in the IP header.
+ *
+ * sa_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the source MAC address.
+ *
+ * da_idx_invalid
+ * Indicates no matching entry was found in the address search
+ * table for the destination MAC address.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is busy
+ * with TX packet processing.
+ *
+ * encrypt_required
+ * Indicates that this data type frame is not encrypted even if
+ * the policy for this MPDU requires encryption as indicated in
+ * the peer table key type.
+ *
+ * directed
+ * MPDU is a directed packet which means that the RA matched
+ * our STA addresses. In proxySTA it means that the TA matched
+ * an entry in our address search table with the corresponding
+ * 'no_ack' bit is the address search entry cleared.
+ *
+ * buffer_fragment
+ * Indicates that at least one of the rx buffers has been
+ * fragmented. If set the FW should look at the rx_frag_info
+ * descriptor described below.
+ *
+ * mpdu_length_err
+ * Indicates that the MPDU was pre-maturely terminated
+ * resulting in a truncated MPDU. Don't trust the MPDU length
+ * field.
+ *
+ * tkip_mic_err
+ * Indicates that the MPDU Michael integrity check failed
+ *
+ * decrypt_err
+ * Indicates that the MPDU decrypt integrity check failed
+ *
+ * fcs_err
+ * Indicates that the MPDU FCS check failed
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful search for the source MAC address
+ * due to the expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful search for the destination MAC
+ * address due to the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus
+ * all the rest of the MSDUs will not be scattered and will not
+ * be decasulated but will be DMA'ed in RAW format as a single
+ * MSDU buffer.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast or Broadcast address.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values are
+ * defined in enum %RX_DESC_DECRYPT_STATUS_CODE*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ *
+ * msdu_done
+ * If set indicates that the RX packet data, RX header data, RX
+ * PPDU start descriptor, RX MPDU start/end descriptor, RX MSDU
+ * start/end descriptors and RX Attention descriptor are all
+ * valid. This bit must be in the last octet of the
+ * descriptor.
+ */
+
+#define RX_MPDU_START_INFO0_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO0_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO0_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO0_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO0_AST_LOOKUP_VALID BIT(13)
+
+#define RX_MPDU_START_INFO1_MPDU_FCTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO1_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO1_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO1_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO1_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO1_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO1_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO1_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO1_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO1_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO1_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO1_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO1_TO_DS BIT(17)
+#define RX_MPDU_START_INFO1_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO1_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO1_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO2_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO2_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO2_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO2_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO2_MESH_STA BIT(8)
+#define RX_MPDU_START_INFO2_BSSID_HIT BIT(9)
+#define RX_MPDU_START_INFO2_BSSID_NUM GENMASK(13, 10)
+#define RX_MPDU_START_INFO2_TID GENMASK(17, 14)
+#define RX_MPDU_START_INFO2_TID_WCN6855 GENMASK(18, 15)
+
+#define RX_MPDU_START_INFO3_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO3_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO3_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO3_RXDMA0_SRC_RING_SEL GENMASK(12, 11)
+#define RX_MPDU_START_INFO3_RXDMA0_DST_RING_SEL GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO4_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO4_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO4_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO4_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO5_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO5_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO5_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO5_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO5_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO5_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO5_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO5_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO5_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO5_BAR_FRAME BIT(29)
+
+#define RX_MPDU_START_INFO6_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO6_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO6_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO6_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO6_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO6_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO6_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO6_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO6_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO6_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO6_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO6_EOSP BIT(24)
+#define RX_MPDU_START_INFO6_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO6_ORDER BIT(26)
+#define RX_MPDU_START_INFO6_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO6_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO6_DIRECTED BIT(29)
+
+#define RX_MPDU_START_RAW_MPDU BIT(0)
+
+struct rx_mpdu_start_ipq8074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 pn[4];
+ __le32 peer_meta_data;
+ __le32 info3;
+ __le32 reo_queue_desc_lo;
+ __le32 info4;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+ __le32 raw;
+} __packed;
+
+#define RX_MPDU_START_INFO7_REO_DEST_IND GENMASK(4, 0)
+#define RX_MPDU_START_INFO7_LMAC_PEER_ID_MSB GENMASK(6, 5)
+#define RX_MPDU_START_INFO7_FLOW_ID_TOEPLITZ BIT(7)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_UCAST_DATA BIT(8)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_MCAST_DATA BIT(9)
+#define RX_MPDU_START_INFO7_PKT_SEL_FP_CTRL_BAR BIT(10)
+#define RX_MPDU_START_INFO7_RXDMA0_SRC_RING_SEL GENMASK(12, 11)
+#define RX_MPDU_START_INFO7_RXDMA0_DST_RING_SEL GENMASK(14, 13)
+
+#define RX_MPDU_START_INFO8_REO_QUEUE_DESC_HI GENMASK(7, 0)
+#define RX_MPDU_START_INFO8_RECV_QUEUE_NUM GENMASK(23, 8)
+#define RX_MPDU_START_INFO8_PRE_DELIM_ERR_WARN BIT(24)
+#define RX_MPDU_START_INFO8_FIRST_DELIM_ERR BIT(25)
+
+#define RX_MPDU_START_INFO9_EPD_EN BIT(0)
+#define RX_MPDU_START_INFO9_ALL_FRAME_ENCPD BIT(1)
+#define RX_MPDU_START_INFO9_ENC_TYPE GENMASK(5, 2)
+#define RX_MPDU_START_INFO9_VAR_WEP_KEY_WIDTH GENMASK(7, 6)
+#define RX_MPDU_START_INFO9_MESH_STA GENMASK(9, 8)
+#define RX_MPDU_START_INFO9_BSSID_HIT BIT(10)
+#define RX_MPDU_START_INFO9_BSSID_NUM GENMASK(14, 11)
+#define RX_MPDU_START_INFO9_TID GENMASK(18, 15)
+
+#define RX_MPDU_START_INFO10_RXPCU_MPDU_FLTR GENMASK(1, 0)
+#define RX_MPDU_START_INFO10_SW_FRAME_GRP_ID GENMASK(8, 2)
+#define RX_MPDU_START_INFO10_NDP_FRAME BIT(9)
+#define RX_MPDU_START_INFO10_PHY_ERR BIT(10)
+#define RX_MPDU_START_INFO10_PHY_ERR_MPDU_HDR BIT(11)
+#define RX_MPDU_START_INFO10_PROTO_VER_ERR BIT(12)
+#define RX_MPDU_START_INFO10_AST_LOOKUP_VALID BIT(13)
+
+#define RX_MPDU_START_INFO11_MPDU_FCTRL_VALID BIT(0)
+#define RX_MPDU_START_INFO11_MPDU_DUR_VALID BIT(1)
+#define RX_MPDU_START_INFO11_MAC_ADDR1_VALID BIT(2)
+#define RX_MPDU_START_INFO11_MAC_ADDR2_VALID BIT(3)
+#define RX_MPDU_START_INFO11_MAC_ADDR3_VALID BIT(4)
+#define RX_MPDU_START_INFO11_MAC_ADDR4_VALID BIT(5)
+#define RX_MPDU_START_INFO11_MPDU_SEQ_CTRL_VALID BIT(6)
+#define RX_MPDU_START_INFO11_MPDU_QOS_CTRL_VALID BIT(7)
+#define RX_MPDU_START_INFO11_MPDU_HT_CTRL_VALID BIT(8)
+#define RX_MPDU_START_INFO11_ENCRYPT_INFO_VALID BIT(9)
+#define RX_MPDU_START_INFO11_MPDU_FRAG_NUMBER GENMASK(13, 10)
+#define RX_MPDU_START_INFO11_MORE_FRAG_FLAG BIT(14)
+#define RX_MPDU_START_INFO11_FROM_DS BIT(16)
+#define RX_MPDU_START_INFO11_TO_DS BIT(17)
+#define RX_MPDU_START_INFO11_ENCRYPTED BIT(18)
+#define RX_MPDU_START_INFO11_MPDU_RETRY BIT(19)
+#define RX_MPDU_START_INFO11_MPDU_SEQ_NUM GENMASK(31, 20)
+
+#define RX_MPDU_START_INFO12_KEY_ID GENMASK(7, 0)
+#define RX_MPDU_START_INFO12_NEW_PEER_ENTRY BIT(8)
+#define RX_MPDU_START_INFO12_DECRYPT_NEEDED BIT(9)
+#define RX_MPDU_START_INFO12_DECAP_TYPE GENMASK(11, 10)
+#define RX_MPDU_START_INFO12_VLAN_TAG_C_PADDING BIT(12)
+#define RX_MPDU_START_INFO12_VLAN_TAG_S_PADDING BIT(13)
+#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_C BIT(14)
+#define RX_MPDU_START_INFO12_STRIP_VLAN_TAG_S BIT(15)
+#define RX_MPDU_START_INFO12_PRE_DELIM_COUNT GENMASK(27, 16)
+#define RX_MPDU_START_INFO12_AMPDU_FLAG BIT(28)
+#define RX_MPDU_START_INFO12_BAR_FRAME BIT(29)
+#define RX_MPDU_START_INFO12_RAW_MPDU BIT(30)
+
+#define RX_MPDU_START_INFO13_MPDU_LEN GENMASK(13, 0)
+#define RX_MPDU_START_INFO13_FIRST_MPDU BIT(14)
+#define RX_MPDU_START_INFO13_MCAST_BCAST BIT(15)
+#define RX_MPDU_START_INFO13_AST_IDX_NOT_FOUND BIT(16)
+#define RX_MPDU_START_INFO13_AST_IDX_TIMEOUT BIT(17)
+#define RX_MPDU_START_INFO13_POWER_MGMT BIT(18)
+#define RX_MPDU_START_INFO13_NON_QOS BIT(19)
+#define RX_MPDU_START_INFO13_NULL_DATA BIT(20)
+#define RX_MPDU_START_INFO13_MGMT_TYPE BIT(21)
+#define RX_MPDU_START_INFO13_CTRL_TYPE BIT(22)
+#define RX_MPDU_START_INFO13_MORE_DATA BIT(23)
+#define RX_MPDU_START_INFO13_EOSP BIT(24)
+#define RX_MPDU_START_INFO13_FRAGMENT BIT(25)
+#define RX_MPDU_START_INFO13_ORDER BIT(26)
+#define RX_MPDU_START_INFO13_UAPSD_TRIGGER BIT(27)
+#define RX_MPDU_START_INFO13_ENCRYPT_REQUIRED BIT(28)
+#define RX_MPDU_START_INFO13_DIRECTED BIT(29)
+#define RX_MPDU_START_INFO13_AMSDU_PRESENT BIT(30)
+
+struct rx_mpdu_start_qcn9074 {
+ __le32 info7;
+ __le32 reo_queue_desc_lo;
+ __le32 info8;
+ __le32 pn[4];
+ __le32 info9;
+ __le32 peer_meta_data;
+ __le16 info10;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info11;
+ __le32 info12;
+ __le32 info13;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+} __packed;
+
+struct rx_mpdu_start_wcn6855 {
+ __le32 info3;
+ __le32 reo_queue_desc_lo;
+ __le32 info4;
+ __le32 pn[4];
+ __le32 info2;
+ __le32 peer_meta_data;
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ast_index;
+ __le16 sw_peer_id;
+ __le32 info1;
+ __le32 info5;
+ __le32 info6;
+ __le16 frame_ctrl;
+ __le16 duration;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ __le16 seq_ctrl;
+ u8 addr4[ETH_ALEN];
+ __le16 qos_ctrl;
+ __le32 ht_ctrl;
+} __packed;
+
+/* rx_mpdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ * Note: for ndp frame, if it was expected because the preceding
+ * NDPA was filter_pass, the setting rxpcu_filter_pass will be
+ * used. This setting will also be used for every ndp frame in
+ * case Promiscuous mode is enabled.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * ndp_frame
+ * Indicates that the received frame was an NDP frame.
+ *
+ * phy_err
+ * Indicates that PHY error was received before MAC received data.
+ *
+ * phy_err_during_mpdu_header
+ * PHY error was received before MAC received the complete MPDU
+ * header which was needed for proper decoding.
+ *
+ * protocol_version_err
+ * RXPCU detected a version error in the frame control field.
+ *
+ * ast_based_lookup_valid
+ * AST based lookup for this frame has found a valid result.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ast_index
+ * This field indicates the index of the AST entry corresponding
+ * to this MPDU. It is provided by the GSE module instantiated in
+ * RXPCU. A value of 0xFFFF indicates an invalid AST index.
+ *
+ * sw_peer_id
+ * This field indicates a unique peer identifier. It is set equal
+ * to field 'sw_peer_id' from the AST entry.
+ *
+ * mpdu_frame_control_valid, mpdu_duration_valid, mpdu_qos_control_valid,
+ * mpdu_ht_control_valid, frame_encryption_info_valid
+ * Indicates that each fields have valid entries.
+ *
+ * mac_addr_adx_valid
+ * Corresponding mac_addr_adx_{lo/hi} has valid entries.
+ *
+ * from_ds, to_ds
+ * Valid only when mpdu_frame_control_valid is set. Indicates that
+ * frame is received from DS and sent to DS.
+ *
+ * encrypted
+ * Protected bit from the frame control.
+ *
+ * mpdu_retry
+ * Retry bit from frame control. Only valid when first_msdu is set.
+ *
+ * mpdu_sequence_number
+ * The sequence number from the 802.11 header.
+ *
+ * epd_en
+ * If set, use EPD instead of LPD.
+ *
+ * all_frames_shall_be_encrypted
+ * If set, all frames (data only?) shall be encrypted. If not,
+ * RX CRYPTO shall set an error flag.
+ *
+ * encrypt_type
+ * Values are defined in enum %HAL_ENCRYPT_TYPE_.
+ *
+ * mesh_sta
+ * Indicates a Mesh (11s) STA.
+ *
+ * bssid_hit
+ * BSSID of the incoming frame matched one of the 8 BSSID
+ * register values.
+ *
+ * bssid_number
+ * This number indicates which one out of the 8 BSSID register
+ * values matched the incoming frame.
+ *
+ * tid
+ * TID field in the QoS control field
+ *
+ * pn
+ * The PN number.
+ *
+ * peer_meta_data
+ * Meta data that SW has programmed in the Peer table entry
+ * of the transmitting STA.
+ *
+ * rx_reo_queue_desc_addr_lo
+ * Address (lower 32 bits) of the REO queue descriptor.
+ *
+ * rx_reo_queue_desc_addr_hi
+ * Address (upper 8 bits) of the REO queue descriptor.
+ *
+ * receive_queue_number
+ * Indicates the MPDU queue ID to which this MPDU link
+ * descriptor belongs.
+ *
+ * pre_delim_err_warning
+ * Indicates that a delimiter FCS error was found in between the
+ * previous MPDU and this MPDU. Note that this is just a warning,
+ * and does not mean that this MPDU is corrupted in any way. If
+ * it is, there will be other errors indicated such as FCS or
+ * decrypt errors.
+ *
+ * first_delim_err
+ * Indicates that the first delimiter had a FCS failure.
+ *
+ * key_id
+ * The key ID octet from the IV.
+ *
+ * new_peer_entry
+ * Set if new RX_PEER_ENTRY TLV follows. If clear, RX_PEER_ENTRY
+ * doesn't follow so RX DECRYPTION module either uses old peer
+ * entry or not decrypt.
+ *
+ * decrypt_needed
+ * When RXPCU sets bit 'ast_index_not_found or ast_index_timeout',
+ * RXPCU will also ensure that this bit is NOT set. CRYPTO for that
+ * reason only needs to evaluate this bit and non of the other ones
+ *
+ * decap_type
+ * Used by the OLE during decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * rx_insert_vlan_c_tag_padding
+ * rx_insert_vlan_s_tag_padding
+ * Insert 4 byte of all zeros as VLAN tag or double VLAN tag if
+ * the rx payload does not have VLAN.
+ *
+ * strip_vlan_c_tag_decap
+ * strip_vlan_s_tag_decap
+ * Strip VLAN or double VLAN during decapsulation.
+ *
+ * pre_delim_count
+ * The number of delimiters before this MPDU. Note that this
+ * number is cleared at PPDU start. If this MPDU is the first
+ * received MPDU in the PPDU and this MPDU gets filtered-in,
+ * this field will indicate the number of delimiters located
+ * after the last MPDU in the previous PPDU.
+ *
+ * If this MPDU is located after the first received MPDU in
+ * an PPDU, this field will indicate the number of delimiters
+ * located between the previous MPDU and this MPDU.
+ *
+ * ampdu_flag
+ * Received frame was part of an A-MPDU.
+ *
+ * bar_frame
+ * Received frame is a BAR frame
+ *
+ * mpdu_length
+ * MPDU length before decapsulation.
+ *
+ * first_mpdu..directed
+ * See definition in RX attention descriptor
+ *
+ */
+
+enum rx_msdu_start_pkt_type {
+ RX_MSDU_START_PKT_TYPE_11A,
+ RX_MSDU_START_PKT_TYPE_11B,
+ RX_MSDU_START_PKT_TYPE_11N,
+ RX_MSDU_START_PKT_TYPE_11AC,
+ RX_MSDU_START_PKT_TYPE_11AX,
+};
+
+enum rx_msdu_start_sgi {
+ RX_MSDU_START_SGI_0_8_US,
+ RX_MSDU_START_SGI_0_4_US,
+ RX_MSDU_START_SGI_1_6_US,
+ RX_MSDU_START_SGI_3_2_US,
+};
+
+enum rx_msdu_start_recv_bw {
+ RX_MSDU_START_RECV_BW_20MHZ,
+ RX_MSDU_START_RECV_BW_40MHZ,
+ RX_MSDU_START_RECV_BW_80MHZ,
+ RX_MSDU_START_RECV_BW_160MHZ,
+};
+
+enum rx_msdu_start_reception_type {
+ RX_MSDU_START_RECEPTION_TYPE_SU,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_MIMO,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA,
+ RX_MSDU_START_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+#define RX_MSDU_START_INFO1_MSDU_LENGTH GENMASK(13, 0)
+#define RX_MSDU_START_INFO1_RSVD_1A BIT(14)
+#define RX_MSDU_START_INFO1_IPSEC_ESP BIT(15)
+#define RX_MSDU_START_INFO1_L3_OFFSET GENMASK(22, 16)
+#define RX_MSDU_START_INFO1_IPSEC_AH BIT(23)
+#define RX_MSDU_START_INFO1_L4_OFFSET GENMASK(31, 24)
+
+#define RX_MSDU_START_INFO2_MSDU_NUMBER GENMASK(7, 0)
+#define RX_MSDU_START_INFO2_DECAP_TYPE GENMASK(9, 8)
+#define RX_MSDU_START_INFO2_IPV4 BIT(10)
+#define RX_MSDU_START_INFO2_IPV6 BIT(11)
+#define RX_MSDU_START_INFO2_TCP BIT(12)
+#define RX_MSDU_START_INFO2_UDP BIT(13)
+#define RX_MSDU_START_INFO2_IP_FRAG BIT(14)
+#define RX_MSDU_START_INFO2_TCP_ONLY_ACK BIT(15)
+#define RX_MSDU_START_INFO2_DA_IS_BCAST_MCAST BIT(16)
+#define RX_MSDU_START_INFO2_SELECTED_TOEPLITZ_HASH GENMASK(18, 17)
+#define RX_MSDU_START_INFO2_IP_FIXED_HDR_VALID BIT(19)
+#define RX_MSDU_START_INFO2_IP_EXTN_HDR_VALID BIT(20)
+#define RX_MSDU_START_INFO2_IP_TCP_UDP_HDR_VALID BIT(21)
+#define RX_MSDU_START_INFO2_MESH_CTRL_PRESENT BIT(22)
+#define RX_MSDU_START_INFO2_LDPC BIT(23)
+#define RX_MSDU_START_INFO2_IP4_IP6_NXT_HDR GENMASK(31, 24)
+#define RX_MSDU_START_INFO2_DECAP_FORMAT GENMASK(9, 8)
+
+#define RX_MSDU_START_INFO3_USER_RSSI GENMASK(7, 0)
+#define RX_MSDU_START_INFO3_PKT_TYPE GENMASK(11, 8)
+#define RX_MSDU_START_INFO3_STBC BIT(12)
+#define RX_MSDU_START_INFO3_SGI GENMASK(14, 13)
+#define RX_MSDU_START_INFO3_RATE_MCS GENMASK(18, 15)
+#define RX_MSDU_START_INFO3_RECV_BW GENMASK(20, 19)
+#define RX_MSDU_START_INFO3_RECEPTION_TYPE GENMASK(23, 21)
+#define RX_MSDU_START_INFO3_MIMO_SS_BITMAP GENMASK(31, 24)
+
+struct rx_msdu_start_ipq8074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+} __packed;
+
+struct rx_msdu_start_qcn9074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+ __le16 vlan_ctag_c1;
+ __le16 vlan_stag_c1;
+} __packed;
+
+struct rx_msdu_start_wcn6855 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+ __le32 info2;
+ __le32 toeplitz_hash;
+ __le32 flow_id_toeplitz;
+ __le32 info3;
+ __le32 ppdu_start_timestamp;
+ __le32 phy_meta_data;
+ __le16 vlan_ctag_ci;
+ __le16 vlan_stag_ci;
+} __packed;
+
+/* rx_msdu_start
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * msdu_length
+ * MSDU length in bytes after decapsulation.
+ *
+ * ipsec_esp
+ * Set if IPv4/v6 packet is using IPsec ESP.
+ *
+ * l3_offset
+ * Depending upon mode bit, this field either indicates the
+ * L3 offset in bytes from the start of the RX_HEADER or the IP
+ * offset in bytes from the start of the packet after
+ * decapsulation. The latter is only valid if ipv4_proto or
+ * ipv6_proto is set.
+ *
+ * ipsec_ah
+ * Set if IPv4/v6 packet is using IPsec AH
+ *
+ * l4_offset
+ * Depending upon mode bit, this field either indicates the
+ * L4 offset in bytes from the start of RX_HEADER (only valid
+ * if either ipv4_proto or ipv6_proto is set to 1) or indicates
+ * the offset in bytes to the start of TCP or UDP header from
+ * the start of the IP header after decapsulation (Only valid if
+ * tcp_proto or udp_proto is set). The value 0 indicates that
+ * the offset is longer than 127 bytes.
+ *
+ * msdu_number
+ * Indicates the MSDU number within a MPDU. This value is
+ * reset to zero at the start of each MPDU. If the number of
+ * MSDU exceeds 255 this number will wrap using modulo 256.
+ *
+ * decap_type
+ * Indicates the format after decapsulation. Values are defined in
+ * enum %MPDU_START_DECAP_TYPE_*.
+ *
+ * ipv4_proto
+ * Set if L2 layer indicates IPv4 protocol.
+ *
+ * ipv6_proto
+ * Set if L2 layer indicates IPv6 protocol.
+ *
+ * tcp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates TCP.
+ *
+ * udp_proto
+ * Set if the ipv4_proto or ipv6_proto are set and the IP protocol
+ * indicates UDP.
+ *
+ * ip_frag
+ * Indicates that either the IP More frag bit is set or IP frag
+ * number is non-zero. If set indicates that this is a fragmented
+ * IP packet.
+ *
+ * tcp_only_ack
+ * Set if only the TCP Ack bit is set in the TCP flags and if
+ * the TCP payload is 0.
+ *
+ * da_is_bcast_mcast
+ * The destination address is broadcast or multicast.
+ *
+ * toeplitz_hash
+ * Actual chosen Hash.
+ * 0 - Toeplitz hash of 2-tuple (IP source address, IP
+ * destination address)
+ * 1 - Toeplitz hash of 4-tuple (IP source address,
+ * IP destination address, L4 (TCP/UDP) source port,
+ * L4 (TCP/UDP) destination port)
+ * 2 - Toeplitz of flow_id
+ * 3 - Zero is used
+ *
+ * ip_fixed_header_valid
+ * Fixed 20-byte IPv4 header or 40-byte IPv6 header parsed
+ * fully within first 256 bytes of the packet
+ *
+ * ip_extn_header_valid
+ * IPv6/IPv6 header, including IPv4 options and
+ * recognizable extension headers parsed fully within first 256
+ * bytes of the packet
+ *
+ * tcp_udp_header_valid
+ * Fixed 20-byte TCP (excluding TCP options) or 8-byte UDP
+ * header parsed fully within first 256 bytes of the packet
+ *
+ * mesh_control_present
+ * When set, this MSDU includes the 'Mesh Control' field
+ *
+ * ldpc
+ *
+ * ip4_protocol_ip6_next_header
+ * For IPv4, this is the 8 bit protocol field set). For IPv6 this
+ * is the 8 bit next_header field.
+ *
+ * toeplitz_hash_2_or_4
+ * Controlled by RxOLE register - If register bit set to 0,
+ * Toeplitz hash is computed over 2-tuple IPv4 or IPv6 src/dest
+ * addresses; otherwise, toeplitz hash is computed over 4-tuple
+ * IPv4 or IPv6 src/dest addresses and src/dest ports.
+ *
+ * flow_id_toeplitz
+ * Toeplitz hash of 5-tuple
+ * {IP source address, IP destination address, IP source port, IP
+ * destination port, L4 protocol} in case of non-IPSec.
+ *
+ * In case of IPSec - Toeplitz hash of 4-tuple
+ * {IP source address, IP destination address, SPI, L4 protocol}
+ *
+ * The relevant Toeplitz key registers are provided in RxOLE's
+ * instance of common parser module. These registers are separate
+ * from the Toeplitz keys used by ASE/FSE modules inside RxOLE.
+ * The actual value will be passed on from common parser module
+ * to RxOLE in one of the WHO_* TLVs.
+ *
+ * user_rssi
+ * RSSI for this user
+ *
+ * pkt_type
+ * Values are defined in enum %RX_MSDU_START_PKT_TYPE_*.
+ *
+ * stbc
+ * When set, use STBC transmission rates.
+ *
+ * sgi
+ * Field only valid when pkt type is HT, VHT or HE. Values are
+ * defined in enum %RX_MSDU_START_SGI_*.
+ *
+ * rate_mcs
+ * MCS Rate used.
+ *
+ * receive_bandwidth
+ * Full receive Bandwidth. Values are defined in enum
+ * %RX_MSDU_START_RECV_*.
+ *
+ * reception_type
+ * Indicates what type of reception this is and defined in enum
+ * %RX_MSDU_START_RECEPTION_TYPE_*.
+ *
+ * mimo_ss_bitmap
+ * Field only valid when
+ * Reception_type is RX_MSDU_START_RECEPTION_TYPE_DL_MU_MIMO or
+ * RX_MSDU_START_RECEPTION_TYPE_DL_MU_OFDMA_MIMO.
+ *
+ * Bitmap, with each bit indicating if the related spatial
+ * stream is used for this STA
+ *
+ * LSB related to SS 0
+ *
+ * 0 - spatial stream not used for this reception
+ * 1 - spatial stream used for this reception
+ *
+ * ppdu_start_timestamp
+ * Timestamp that indicates when the PPDU that contained this MPDU
+ * started on the medium.
+ *
+ * phy_meta_data
+ * SW programmed Meta data provided by the PHY. Can be used for SW
+ * to indicate the channel the device is on.
+ */
+
+#define RX_MSDU_END_INFO0_RXPCU_MPDU_FITLER GENMASK(1, 0)
+#define RX_MSDU_END_INFO0_SW_FRAME_GRP_ID GENMASK(8, 2)
+
+#define RX_MSDU_END_INFO1_KEY_ID GENMASK(7, 0)
+#define RX_MSDU_END_INFO1_CCE_SUPER_RULE GENMASK(13, 8)
+#define RX_MSDU_END_INFO1_CCND_TRUNCATE BIT(14)
+#define RX_MSDU_END_INFO1_CCND_CCE_DIS BIT(15)
+#define RX_MSDU_END_INFO1_EXT_WAPI_PN GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO2_REPORTED_MPDU_LEN GENMASK(13, 0)
+#define RX_MSDU_END_INFO2_FIRST_MSDU BIT(14)
+#define RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855 BIT(28)
+#define RX_MSDU_END_INFO2_LAST_MSDU BIT(15)
+#define RX_MSDU_END_INFO2_LAST_MSDU_WCN6855 BIT(29)
+#define RX_MSDU_END_INFO2_SA_IDX_TIMEOUT BIT(16)
+#define RX_MSDU_END_INFO2_DA_IDX_TIMEOUT BIT(17)
+#define RX_MSDU_END_INFO2_MSDU_LIMIT_ERR BIT(18)
+#define RX_MSDU_END_INFO2_FLOW_IDX_TIMEOUT BIT(19)
+#define RX_MSDU_END_INFO2_FLOW_IDX_INVALID BIT(20)
+#define RX_MSDU_END_INFO2_WIFI_PARSER_ERR BIT(21)
+#define RX_MSDU_END_INFO2_AMSDU_PARSET_ERR BIT(22)
+#define RX_MSDU_END_INFO2_SA_IS_VALID BIT(23)
+#define RX_MSDU_END_INFO2_DA_IS_VALID BIT(24)
+#define RX_MSDU_END_INFO2_DA_IS_MCBC BIT(25)
+#define RX_MSDU_END_INFO2_L3_HDR_PADDING GENMASK(27, 26)
+
+#define RX_MSDU_END_INFO3_TCP_FLAG GENMASK(8, 0)
+#define RX_MSDU_END_INFO3_LRO_ELIGIBLE BIT(9)
+
+#define RX_MSDU_END_INFO4_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO4_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO4_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO4_SA_OFFSET_VALID BIT(13)
+#define RX_MSDU_END_INFO4_L3_TYPE GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO5_MSDU_DROP BIT(0)
+#define RX_MSDU_END_INFO5_REO_DEST_IND GENMASK(5, 1)
+#define RX_MSDU_END_INFO5_FLOW_IDX GENMASK(25, 6)
+
+struct rx_msdu_end_ipq8074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 tcp_udp_cksum;
+ __le32 info1;
+ __le32 ext_wapi_pn[2];
+ __le32 info2;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le32 info4;
+ __le32 rule_indication[2];
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+} __packed;
+
+struct rx_msdu_end_wcn6855 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 reported_mpdu_len;
+ __le32 info1;
+ __le32 ext_wapi_pn[2];
+ __le32 info4;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le32 info2;
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+ __le32 rule_indication[2];
+ __le32 info6;
+ __le32 info7;
+} __packed;
+
+#define RX_MSDU_END_MPDU_LENGTH_INFO GENMASK(13, 0)
+
+#define RX_MSDU_END_INFO2_DA_OFFSET GENMASK(5, 0)
+#define RX_MSDU_END_INFO2_SA_OFFSET GENMASK(11, 6)
+#define RX_MSDU_END_INFO2_DA_OFFSET_VALID BIT(12)
+#define RX_MSDU_END_INFO2_SA_OFFSET_VALID BIT(13)
+#define RX_MSDU_END_INFO2_L3_TYPE GENMASK(31, 16)
+
+#define RX_MSDU_END_INFO4_SA_IDX_TIMEOUT BIT(0)
+#define RX_MSDU_END_INFO4_DA_IDX_TIMEOUT BIT(1)
+#define RX_MSDU_END_INFO4_MSDU_LIMIT_ERR BIT(2)
+#define RX_MSDU_END_INFO4_FLOW_IDX_TIMEOUT BIT(3)
+#define RX_MSDU_END_INFO4_FLOW_IDX_INVALID BIT(4)
+#define RX_MSDU_END_INFO4_WIFI_PARSER_ERR BIT(5)
+#define RX_MSDU_END_INFO4_AMSDU_PARSER_ERR BIT(6)
+#define RX_MSDU_END_INFO4_SA_IS_VALID BIT(7)
+#define RX_MSDU_END_INFO4_DA_IS_VALID BIT(8)
+#define RX_MSDU_END_INFO4_DA_IS_MCBC BIT(9)
+#define RX_MSDU_END_INFO4_L3_HDR_PADDING GENMASK(11, 10)
+#define RX_MSDU_END_INFO4_FIRST_MSDU BIT(12)
+#define RX_MSDU_END_INFO4_LAST_MSDU BIT(13)
+
+#define RX_MSDU_END_INFO6_AGGR_COUNT GENMASK(7, 0)
+#define RX_MSDU_END_INFO6_FLOW_AGGR_CONTN BIT(8)
+#define RX_MSDU_END_INFO6_FISA_TIMEOUT BIT(9)
+
+struct rx_msdu_end_qcn9074 {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le16 ip_hdr_cksum;
+ __le16 mpdu_length_info;
+ __le32 info1;
+ __le32 rule_indication[2];
+ __le32 info2;
+ __le32 ipv6_options_crc;
+ __le32 tcp_seq_num;
+ __le32 tcp_ack_num;
+ __le16 info3;
+ __le16 window_size;
+ __le16 tcp_udp_cksum;
+ __le16 info4;
+ __le16 sa_idx;
+ __le16 da_idx;
+ __le32 info5;
+ __le32 fse_metadata;
+ __le16 cce_metadata;
+ __le16 sa_sw_peer_id;
+ __le32 info6;
+ __le16 cum_l4_cksum;
+ __le16 cum_ip_length;
+} __packed;
+
+/* rx_msdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * ip_hdr_cksum
+ * This can include the IP header checksum or the pseudo
+ * header checksum used by TCP/UDP checksum.
+ *
+ * tcp_udp_chksum
+ * The value of the computed TCP/UDP checksum. A mode bit
+ * selects whether this checksum is the full checksum or the
+ * partial checksum which does not include the pseudo header.
+ *
+ * key_id
+ * The key ID octet from the IV. Only valid when first_msdu is set.
+ *
+ * cce_super_rule
+ * Indicates the super filter rule.
+ *
+ * cce_classify_not_done_truncate
+ * Classification failed due to truncated frame.
+ *
+ * cce_classify_not_done_cce_dis
+ * Classification failed due to CCE global disable
+ *
+ * ext_wapi_pn*
+ * Extension PN (packet number) which is only used by WAPI.
+ *
+ * reported_mpdu_length
+ * MPDU length before decapsulation. Only valid when first_msdu is
+ * set. This field is taken directly from the length field of the
+ * A-MPDU delimiter or the preamble length field for non-A-MPDU
+ * frames.
+ *
+ * first_msdu
+ * Indicates the first MSDU of A-MSDU. If both first_msdu and
+ * last_msdu are set in the MSDU then this is a non-aggregated MSDU
+ * frame: normal MPDU. Interior MSDU in an A-MSDU shall have both
+ * first_mpdu and last_mpdu bits set to 0.
+ *
+ * last_msdu
+ * Indicates the last MSDU of the A-MSDU. MPDU end status is only
+ * valid when last_msdu is set.
+ *
+ * sa_idx_timeout
+ * Indicates an unsuccessful MAC source address search due to the
+ * expiring of the search timer.
+ *
+ * da_idx_timeout
+ * Indicates an unsuccessful MAC destination address search due to
+ * the expiring of the search timer.
+ *
+ * msdu_limit_error
+ * Indicates that the MSDU threshold was exceeded and thus all the
+ * rest of the MSDUs will not be scattered and will not be
+ * decapsulated but will be DMA'ed in RAW format as a single MSDU.
+ *
+ * flow_idx_timeout
+ * Indicates an unsuccessful flow search due to the expiring of
+ * the search timer.
+ *
+ * flow_idx_invalid
+ * flow id is not valid.
+ *
+ * amsdu_parser_error
+ * A-MSDU could not be properly de-agregated.
+ *
+ * sa_is_valid
+ * Indicates that OLE found a valid SA entry.
+ *
+ * da_is_valid
+ * Indicates that OLE found a valid DA entry.
+ *
+ * da_is_mcbc
+ * Field Only valid if da_is_valid is set. Indicates the DA address
+ * was a Multicast of Broadcast address.
+ *
+ * l3_header_padding
+ * Number of bytes padded to make sure that the L3 header will
+ * always start of a Dword boundary.
+ *
+ * ipv6_options_crc
+ * 32 bit CRC computed out of IP v6 extension headers.
+ *
+ * tcp_seq_number
+ * TCP sequence number.
+ *
+ * tcp_ack_number
+ * TCP acknowledge number.
+ *
+ * tcp_flag
+ * TCP flags {NS, CWR, ECE, URG, ACK, PSH, RST, SYN, FIN}.
+ *
+ * lro_eligible
+ * Computed out of TCP and IP fields to indicate that this
+ * MSDU is eligible for LRO.
+ *
+ * window_size
+ * TCP receive window size.
+ *
+ * da_offset
+ * Offset into MSDU buffer for DA.
+ *
+ * sa_offset
+ * Offset into MSDU buffer for SA.
+ *
+ * da_offset_valid
+ * da_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when DA is compressed.
+ *
+ * sa_offset_valid
+ * sa_offset field is valid. This will be set to 0 in case
+ * of a dynamic A-MSDU when SA is compressed.
+ *
+ * l3_type
+ * The 16-bit type value indicating the type of L3 later
+ * extracted from LLC/SNAP, set to zero if SNAP is not
+ * available.
+ *
+ * rule_indication
+ * Bitmap indicating which of rules have matched.
+ *
+ * sa_idx
+ * The offset in the address table which matches MAC source address
+ *
+ * da_idx
+ * The offset in the address table which matches MAC destination
+ * address.
+ *
+ * msdu_drop
+ * REO shall drop this MSDU and not forward it to any other ring.
+ *
+ * reo_destination_indication
+ * The id of the reo exit ring where the msdu frame shall push
+ * after (MPDU level) reordering has finished. Values are defined
+ * in enum %HAL_RX_MSDU_DESC_REO_DEST_IND_.
+ *
+ * flow_idx
+ * Flow table index.
+ *
+ * fse_metadata
+ * FSE related meta data.
+ *
+ * cce_metadata
+ * CCE related meta data.
+ *
+ * sa_sw_peer_id
+ * sw_peer_id from the address search entry corresponding to the
+ * source address of the MSDU.
+ */
+
+enum rx_mpdu_end_rxdma_dest_ring {
+ RX_MPDU_END_RXDMA_DEST_RING_RELEASE,
+ RX_MPDU_END_RXDMA_DEST_RING_FW,
+ RX_MPDU_END_RXDMA_DEST_RING_SW,
+ RX_MPDU_END_RXDMA_DEST_RING_REO,
+};
+
+#define RX_MPDU_END_INFO1_UNSUP_KTYPE_SHORT_FRAME BIT(11)
+#define RX_MPDU_END_INFO1_RX_IN_TX_DECRYPT_BYT BIT(12)
+#define RX_MPDU_END_INFO1_OVERFLOW_ERR BIT(13)
+#define RX_MPDU_END_INFO1_MPDU_LEN_ERR BIT(14)
+#define RX_MPDU_END_INFO1_TKIP_MIC_ERR BIT(15)
+#define RX_MPDU_END_INFO1_DECRYPT_ERR BIT(16)
+#define RX_MPDU_END_INFO1_UNENCRYPTED_FRAME_ERR BIT(17)
+#define RX_MPDU_END_INFO1_PN_FIELDS_VALID BIT(18)
+#define RX_MPDU_END_INFO1_FCS_ERR BIT(19)
+#define RX_MPDU_END_INFO1_MSDU_LEN_ERR BIT(20)
+#define RX_MPDU_END_INFO1_RXDMA0_DEST_RING GENMASK(22, 21)
+#define RX_MPDU_END_INFO1_RXDMA1_DEST_RING GENMASK(24, 23)
+#define RX_MPDU_END_INFO1_DECRYPT_STATUS_CODE GENMASK(27, 25)
+#define RX_MPDU_END_INFO1_RX_BITMAP_NOT_UPD BIT(28)
+
+struct rx_mpdu_end {
+ __le16 info0;
+ __le16 phy_ppdu_id;
+ __le32 info1;
+} __packed;
+
+/* rx_mpdu_end
+ *
+ * rxpcu_mpdu_filter_in_category
+ * Field indicates what the reason was that this mpdu frame
+ * was allowed to come into the receive path by rxpcu. Values
+ * are defined in enum %RX_DESC_RXPCU_FILTER_*.
+ *
+ * sw_frame_group_id
+ * SW processes frames based on certain classifications. Values
+ * are defined in enum %RX_DESC_SW_FRAME_GRP_ID_*.
+ *
+ * phy_ppdu_id
+ * A ppdu counter value that PHY increments for every PPDU
+ * received. The counter value wraps around.
+ *
+ * unsup_ktype_short_frame
+ * This bit will be '1' when WEP or TKIP or WAPI key type is
+ * received for 11ah short frame. Crypto will bypass the received
+ * packet without decryption to RxOLE after setting this bit.
+ *
+ * rx_in_tx_decrypt_byp
+ * Indicates that RX packet is not decrypted as Crypto is
+ * busy with TX packet processing.
+ *
+ * overflow_err
+ * RXPCU Receive FIFO ran out of space to receive the full MPDU.
+ * Therefore this MPDU is terminated early and is thus corrupted.
+ *
+ * This MPDU will not be ACKed.
+ *
+ * RXPCU might still be able to correctly receive the following
+ * MPDUs in the PPDU if enough fifo space became available in time.
+ *
+ * mpdu_length_err
+ * Set by RXPCU if the expected MPDU length does not correspond
+ * with the actually received number of bytes in the MPDU.
+ *
+ * tkip_mic_err
+ * Set by Rx crypto when crypto detected a TKIP MIC error for
+ * this MPDU.
+ *
+ * decrypt_err
+ * Set by RX CRYPTO when CRYPTO detected a decrypt error for this
+ * MPDU or CRYPTO received an encrypted frame, but did not get a
+ * valid corresponding key id in the peer entry.
+ *
+ * unencrypted_frame_err
+ * Set by RX CRYPTO when CRYPTO detected an unencrypted frame while
+ * in the peer entry field 'All_frames_shall_be_encrypted' is set.
+ *
+ * pn_fields_contain_valid_info
+ * Set by RX CRYPTO to indicate that there is a valid PN field
+ * present in this MPDU.
+ *
+ * fcs_err
+ * Set by RXPCU when there is an FCS error detected for this MPDU.
+ *
+ * msdu_length_err
+ * Set by RXOLE when there is an msdu length error detected
+ * in at least 1 of the MSDUs embedded within the MPDU.
+ *
+ * rxdma0_destination_ring
+ * rxdma1_destination_ring
+ * The ring to which RXDMA0/1 shall push the frame, assuming
+ * no MPDU level errors are detected. In case of MPDU level
+ * errors, RXDMA0/1 might change the RXDMA0/1 destination. Values
+ * are defined in %enum RX_MPDU_END_RXDMA_DEST_RING_*.
+ *
+ * decrypt_status_code
+ * Field provides insight into the decryption performed. Values
+ * are defined in enum %RX_DESC_DECRYPT_STATUS_CODE_*.
+ *
+ * rx_bitmap_not_updated
+ * Frame is received, but RXPCU could not update the receive bitmap
+ * due to (temporary) fifo constraints.
+ */
+
+/* Padding bytes to avoid TLV's spanning across 128 byte boundary */
+#define HAL_RX_DESC_PADDING0_BYTES 4
+#define HAL_RX_DESC_PADDING1_BYTES 16
+
+#define HAL_RX_DESC_HDR_STATUS_LEN 120
+
+struct hal_rx_desc_ipq8074 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_ipq8074 msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start_ipq8074 msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_ipq8074 mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+} __packed;
+
+struct hal_rx_desc_qcn9074 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_qcn9074 msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start_qcn9074 msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_qcn9074 mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+} __packed;
+
+struct hal_rx_desc_wcn6855 {
+ __le32 msdu_end_tag;
+ struct rx_msdu_end_wcn6855 msdu_end;
+ __le32 rx_attn_tag;
+ struct rx_attention attention;
+ __le32 msdu_start_tag;
+ struct rx_msdu_start_wcn6855 msdu_start;
+ u8 rx_padding0[HAL_RX_DESC_PADDING0_BYTES];
+ __le32 mpdu_start_tag;
+ struct rx_mpdu_start_wcn6855 mpdu_start;
+ __le32 mpdu_end_tag;
+ struct rx_mpdu_end mpdu_end;
+ u8 rx_padding1[HAL_RX_DESC_PADDING1_BYTES];
+ __le32 hdr_status_tag;
+ __le32 phy_ppdu_id;
+ u8 hdr_status[HAL_RX_DESC_HDR_STATUS_LEN];
+ u8 msdu_payload[];
+} __packed;
+
+struct hal_rx_desc {
+ union {
+ struct hal_rx_desc_ipq8074 ipq8074;
+ struct hal_rx_desc_qcn9074 qcn9074;
+ struct hal_rx_desc_wcn6855 wcn6855;
+ } u;
+} __packed;
+
+#define HAL_RX_RU_ALLOC_TYPE_MAX 6
+#define RU_26 1
+#define RU_52 2
+#define RU_106 4
+#define RU_242 9
+#define RU_484 18
+#define RU_996 37
+
+#endif /* ATH11K_RX_DESC_H */
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
new file mode 100644
index 0000000000..705868198d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -0,0 +1,1064 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/relay.h>
+#include "core.h"
+#include "debug.h"
+
+#define ATH11K_SPECTRAL_NUM_RESP_PER_EVENT 2
+#define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS 1
+
+#define ATH11K_SPECTRAL_DWORD_SIZE 4
+#define ATH11K_SPECTRAL_MIN_BINS 32
+#define ATH11K_SPECTRAL_MIN_IB_BINS (ATH11K_SPECTRAL_MIN_BINS >> 1)
+#define ATH11K_SPECTRAL_MAX_IB_BINS(x) ((x)->hw_params.spectral.max_fft_bins >> 1)
+
+#define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095
+
+/* Max channel computed by sum of 2g and 5g band channels */
+#define ATH11K_SPECTRAL_TOTAL_CHANNEL 41
+#define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL 70
+#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) (sizeof(struct fft_sample_ath11k) + \
+ ATH11K_SPECTRAL_MAX_IB_BINS(x))
+#define ATH11K_SPECTRAL_TOTAL_SAMPLE (ATH11K_SPECTRAL_TOTAL_CHANNEL * \
+ ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL)
+#define ATH11K_SPECTRAL_SUB_BUFF_SIZE(x) ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x)
+#define ATH11K_SPECTRAL_NUM_SUB_BUF ATH11K_SPECTRAL_TOTAL_SAMPLE
+
+#define ATH11K_SPECTRAL_20MHZ 20
+#define ATH11K_SPECTRAL_40MHZ 40
+#define ATH11K_SPECTRAL_80MHZ 80
+#define ATH11K_SPECTRAL_160MHZ 160
+
+#define ATH11K_SPECTRAL_SIGNATURE 0xFA
+
+#define ATH11K_SPECTRAL_TAG_RADAR_SUMMARY 0x0
+#define ATH11K_SPECTRAL_TAG_RADAR_FFT 0x1
+#define ATH11K_SPECTRAL_TAG_SCAN_SUMMARY 0x2
+#define ATH11K_SPECTRAL_TAG_SCAN_SEARCH 0x3
+
+#define SPECTRAL_TLV_HDR_LEN GENMASK(15, 0)
+#define SPECTRAL_TLV_HDR_TAG GENMASK(23, 16)
+#define SPECTRAL_TLV_HDR_SIGN GENMASK(31, 24)
+
+#define SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN GENMASK(7, 0)
+#define SPECTRAL_SUMMARY_INFO0_OB_FLAG BIT(8)
+#define SPECTRAL_SUMMARY_INFO0_GRP_IDX GENMASK(16, 9)
+#define SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT BIT(17)
+#define SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB GENMASK(27, 18)
+#define SPECTRAL_SUMMARY_INFO0_FALSE_SCAN BIT(28)
+#define SPECTRAL_SUMMARY_INFO0_DETECTOR_ID GENMASK(30, 29)
+#define SPECTRAL_SUMMARY_INFO0_PRI80 BIT(31)
+
+#define SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX GENMASK(11, 0)
+#define SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE GENMASK(21, 12)
+#define SPECTRAL_SUMMARY_INFO2_NARROWBAND_MASK GENMASK(29, 22)
+#define SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE BIT(30)
+
+struct spectral_tlv {
+ __le32 timestamp;
+ __le32 header;
+} __packed;
+
+struct spectral_summary_fft_report {
+ __le32 timestamp;
+ __le32 tlv_header;
+ __le32 info0;
+ __le32 reserve0;
+ __le32 info2;
+ __le32 reserve1;
+} __packed;
+
+struct ath11k_spectral_summary_report {
+ struct wmi_dma_buf_release_meta_data meta;
+ u32 timestamp;
+ u8 agc_total_gain;
+ u8 grp_idx;
+ u16 inb_pwr_db;
+ s16 peak_idx;
+ u16 peak_mag;
+ u8 detector_id;
+ bool out_of_band_flag;
+ bool rf_saturation;
+ bool primary80;
+ bool gain_change;
+ bool false_scan;
+};
+
+#define SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID GENMASK(1, 0)
+#define SPECTRAL_FFT_REPORT_INFO0_FFT_NUM GENMASK(4, 2)
+#define SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK GENMASK(16, 5)
+#define SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX GENMASK(27, 17)
+#define SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX GENMASK(30, 28)
+
+#define SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB GENMASK(8, 0)
+#define SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB GENMASK(16, 9)
+
+#define SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS GENMASK(7, 0)
+#define SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE GENMASK(17, 8)
+#define SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB GENMASK(24, 18)
+#define SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB GENMASK(31, 25)
+
+struct spectral_search_fft_report {
+ __le32 timestamp;
+ __le32 tlv_header;
+ __le32 info0;
+ __le32 info1;
+ __le32 info2;
+ __le32 reserve0;
+ u8 bins[];
+} __packed;
+
+struct ath11k_spectral_search_report {
+ u32 timestamp;
+ u8 detector_id;
+ u8 fft_count;
+ u16 radar_check;
+ s16 peak_idx;
+ u8 chain_idx;
+ u16 base_pwr_db;
+ u8 total_gain_db;
+ u8 strong_bin_count;
+ u16 peak_mag;
+ u8 avg_pwr_db;
+ u8 rel_pwr_db;
+};
+
+static struct dentry *create_buf_file_handler(const char *filename,
+ struct dentry *parent,
+ umode_t mode,
+ struct rchan_buf *buf,
+ int *is_global)
+{
+ struct dentry *buf_file;
+
+ buf_file = debugfs_create_file(filename, mode, parent, buf,
+ &relay_file_operations);
+ *is_global = 1;
+ return buf_file;
+}
+
+static int remove_buf_file_handler(struct dentry *dentry)
+{
+ debugfs_remove(dentry);
+
+ return 0;
+}
+
+static const struct rchan_callbacks rfs_scan_cb = {
+ .create_buf_file = create_buf_file_handler,
+ .remove_buf_file = remove_buf_file_handler,
+};
+
+static struct ath11k_vif *ath11k_spectral_get_vdev(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (list_empty(&ar->arvifs))
+ return NULL;
+
+ /* if there already is a vif doing spectral, return that. */
+ list_for_each_entry(arvif, &ar->arvifs, list)
+ if (arvif->spectral_enabled)
+ return arvif;
+
+ /* otherwise, return the first vif. */
+ return list_first_entry(&ar->arvifs, typeof(*arvif), list);
+}
+
+static int ath11k_spectral_scan_trigger(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath11k_spectral_get_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+
+ if (ar->spectral.mode == ATH11K_SPECTRAL_DISABLED)
+ return 0;
+
+ ar->spectral.is_primary = true;
+
+ ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
+ ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (ret)
+ return ret;
+
+ ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
+ ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER,
+ ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ath11k_spectral_scan_config(struct ath11k *ar,
+ enum ath11k_spectral_mode mode)
+{
+ struct ath11k_wmi_vdev_spectral_conf_param param = { 0 };
+ struct ath11k_vif *arvif;
+ int ret, count;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ arvif = ath11k_spectral_get_vdev(ar);
+ if (!arvif)
+ return -ENODEV;
+
+ arvif->spectral_enabled = (mode != ATH11K_SPECTRAL_DISABLED);
+
+ spin_lock_bh(&ar->spectral.lock);
+ ar->spectral.mode = mode;
+ spin_unlock_bh(&ar->spectral.lock);
+
+ ret = ath11k_wmi_vdev_spectral_enable(ar, arvif->vdev_id,
+ ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR,
+ ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable spectral scan: %d\n", ret);
+ return ret;
+ }
+
+ if (mode == ATH11K_SPECTRAL_DISABLED)
+ return 0;
+
+ if (mode == ATH11K_SPECTRAL_BACKGROUND)
+ count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT;
+ else
+ count = max_t(u16, 1, ar->spectral.count);
+
+ param.vdev_id = arvif->vdev_id;
+ param.scan_count = count;
+ param.scan_fft_size = ar->spectral.fft_size;
+ param.scan_period = ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT;
+ param.scan_priority = ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT;
+ param.scan_gc_ena = ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT;
+ param.scan_restart_ena = ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT;
+ param.scan_noise_floor_ref = ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT;
+ param.scan_init_delay = ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT;
+ param.scan_nb_tone_thr = ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT;
+ param.scan_str_bin_thr = ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT;
+ param.scan_wb_rpt_mode = ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT;
+ param.scan_rssi_rpt_mode = ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT;
+ param.scan_rssi_thr = ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT;
+ param.scan_pwr_format = ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT;
+ param.scan_rpt_mode = ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT;
+ param.scan_bin_scale = ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT;
+ param.scan_dbm_adj = ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT;
+ param.scan_chn_mask = ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT;
+
+ ret = ath11k_wmi_vdev_spectral_conf(ar, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure spectral scan: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static ssize_t ath11k_read_file_spec_scan_ctl(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char *mode = "";
+ size_t len;
+ enum ath11k_spectral_mode spectral_mode;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_mode = ar->spectral.mode;
+ mutex_unlock(&ar->conf_mutex);
+
+ switch (spectral_mode) {
+ case ATH11K_SPECTRAL_DISABLED:
+ mode = "disable";
+ break;
+ case ATH11K_SPECTRAL_BACKGROUND:
+ mode = "background";
+ break;
+ case ATH11K_SPECTRAL_MANUAL:
+ mode = "manual";
+ break;
+ }
+
+ len = strlen(mode);
+ return simple_read_from_buffer(user_buf, count, ppos, mode, len);
+}
+
+static ssize_t ath11k_write_file_spec_scan_ctl(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ ssize_t len;
+ int ret;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (strncmp("trigger", buf, 7) == 0) {
+ if (ar->spectral.mode == ATH11K_SPECTRAL_MANUAL ||
+ ar->spectral.mode == ATH11K_SPECTRAL_BACKGROUND) {
+ /* reset the configuration to adopt possibly changed
+ * debugfs parameters
+ */
+ ret = ath11k_spectral_scan_config(ar, ar->spectral.mode);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to reconfigure spectral scan: %d\n",
+ ret);
+ goto unlock;
+ }
+
+ ret = ath11k_spectral_scan_trigger(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to trigger spectral scan: %d\n",
+ ret);
+ }
+ } else {
+ ret = -EINVAL;
+ }
+ } else if (strncmp("background", buf, 10) == 0) {
+ ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_BACKGROUND);
+ } else if (strncmp("manual", buf, 6) == 0) {
+ ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_MANUAL);
+ } else if (strncmp("disable", buf, 7) == 0) {
+ ret = ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
+ } else {
+ ret = -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_scan_ctl = {
+ .read = ath11k_read_file_spec_scan_ctl,
+ .write = ath11k_write_file_spec_scan_ctl,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_file_spectral_count(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ size_t len;
+ u16 spectral_count;
+
+ mutex_lock(&ar->conf_mutex);
+ spectral_count = ar->spectral.count;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", spectral_count);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_file_spectral_count(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val > ATH11K_SPECTRAL_SCAN_COUNT_MAX)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.count = val;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_scan_count = {
+ .read = ath11k_read_file_spectral_count,
+ .write = ath11k_write_file_spectral_count,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_file_spectral_bins(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ unsigned int bins, fft_size;
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+
+ fft_size = ar->spectral.fft_size;
+ bins = 1 << fft_size;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ len = sprintf(buf, "%d\n", bins);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath11k_write_file_spectral_bins(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val < ATH11K_SPECTRAL_MIN_BINS ||
+ val > ar->ab->hw_params.spectral.max_fft_bins)
+ return -EINVAL;
+
+ if (!is_power_of_2(val))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+ ar->spectral.fft_size = ilog2(val);
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_scan_bins = {
+ .read = ath11k_read_file_spectral_bins,
+ .write = ath11k_write_file_spectral_bins,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_spectral_pull_summary(struct ath11k *ar,
+ struct wmi_dma_buf_release_meta_data *meta,
+ struct spectral_summary_fft_report *summary,
+ struct ath11k_spectral_summary_report *report)
+{
+ report->timestamp = __le32_to_cpu(summary->timestamp);
+ report->agc_total_gain = FIELD_GET(SPECTRAL_SUMMARY_INFO0_AGC_TOTAL_GAIN,
+ __le32_to_cpu(summary->info0));
+ report->out_of_band_flag = FIELD_GET(SPECTRAL_SUMMARY_INFO0_OB_FLAG,
+ __le32_to_cpu(summary->info0));
+ report->grp_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO0_GRP_IDX,
+ __le32_to_cpu(summary->info0));
+ report->rf_saturation = FIELD_GET(SPECTRAL_SUMMARY_INFO0_RECENT_RFSAT,
+ __le32_to_cpu(summary->info0));
+ report->inb_pwr_db = FIELD_GET(SPECTRAL_SUMMARY_INFO0_INBAND_PWR_DB,
+ __le32_to_cpu(summary->info0));
+ report->false_scan = FIELD_GET(SPECTRAL_SUMMARY_INFO0_FALSE_SCAN,
+ __le32_to_cpu(summary->info0));
+ report->detector_id = FIELD_GET(SPECTRAL_SUMMARY_INFO0_DETECTOR_ID,
+ __le32_to_cpu(summary->info0));
+ report->primary80 = FIELD_GET(SPECTRAL_SUMMARY_INFO0_PRI80,
+ __le32_to_cpu(summary->info0));
+ report->peak_idx = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_SIGNED_IDX,
+ __le32_to_cpu(summary->info2));
+ report->peak_mag = FIELD_GET(SPECTRAL_SUMMARY_INFO2_PEAK_MAGNITUDE,
+ __le32_to_cpu(summary->info2));
+ report->gain_change = FIELD_GET(SPECTRAL_SUMMARY_INFO2_GAIN_CHANGE,
+ __le32_to_cpu(summary->info2));
+
+ memcpy(&report->meta, meta, sizeof(*meta));
+
+ return 0;
+}
+
+static int ath11k_spectral_pull_search(struct ath11k *ar,
+ struct spectral_search_fft_report *search,
+ struct ath11k_spectral_search_report *report)
+{
+ report->timestamp = __le32_to_cpu(search->timestamp);
+ report->detector_id = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_DETECTOR_ID,
+ __le32_to_cpu(search->info0));
+ report->fft_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_FFT_NUM,
+ __le32_to_cpu(search->info0));
+ report->radar_check = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_RADAR_CHECK,
+ __le32_to_cpu(search->info0));
+ report->peak_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX,
+ __le32_to_cpu(search->info0));
+ report->chain_idx = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_CHAIN_IDX,
+ __le32_to_cpu(search->info0));
+ report->base_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_BASE_PWR_DB,
+ __le32_to_cpu(search->info1));
+ report->total_gain_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO1_TOTAL_GAIN_DB,
+ __le32_to_cpu(search->info1));
+ report->strong_bin_count = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_NUM_STRONG_BINS,
+ __le32_to_cpu(search->info2));
+ report->peak_mag = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_PEAK_MAGNITUDE,
+ __le32_to_cpu(search->info2));
+ report->avg_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_AVG_PWR_DB,
+ __le32_to_cpu(search->info2));
+ report->rel_pwr_db = FIELD_GET(SPECTRAL_FFT_REPORT_INFO2_REL_PWR_DB,
+ __le32_to_cpu(search->info2));
+
+ return 0;
+}
+
+static u8 ath11k_spectral_get_max_exp(s8 max_index, u8 max_magnitude,
+ int bin_len, u8 *bins)
+{
+ int dc_pos;
+ u8 max_exp;
+
+ dc_pos = bin_len / 2;
+
+ /* peak index outside of bins */
+ if (dc_pos <= max_index || -dc_pos >= max_index)
+ return 0;
+
+ for (max_exp = 0; max_exp < 8; max_exp++) {
+ if (bins[dc_pos + max_index] == (max_magnitude >> max_exp))
+ break;
+ }
+
+ /* max_exp not found */
+ if (bins[dc_pos + max_index] != (max_magnitude >> max_exp))
+ return 0;
+
+ return max_exp;
+}
+
+static void ath11k_spectral_parse_fft(u8 *outbins, u8 *inbins, int num_bins, u8 fft_sz)
+{
+ int i, j;
+
+ i = 0;
+ j = 0;
+ while (i < num_bins) {
+ outbins[i] = inbins[j];
+ i++;
+ j += fft_sz;
+ }
+}
+
+static
+int ath11k_spectral_process_fft(struct ath11k *ar,
+ struct ath11k_spectral_summary_report *summary,
+ void *data,
+ struct fft_sample_ath11k *fft_sample,
+ u32 data_len)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct spectral_search_fft_report *fft_report = data;
+ struct ath11k_spectral_search_report search;
+ struct spectral_tlv *tlv;
+ int tlv_len, bin_len, num_bins;
+ u16 length, freq;
+ u8 chan_width_mhz, bin_sz;
+ int ret;
+ u32 check_length;
+ bool fragment_sample = false;
+
+ lockdep_assert_held(&ar->spectral.lock);
+
+ if (!ab->hw_params.spectral.fft_sz) {
+ ath11k_warn(ab, "invalid bin size type for hw rev %d\n",
+ ab->hw_rev);
+ return -EINVAL;
+ }
+
+ tlv = (struct spectral_tlv *)data;
+ tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header));
+ /* convert Dword into bytes */
+ tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
+ bin_len = tlv_len - ab->hw_params.spectral.fft_hdr_len;
+
+ if (data_len < (bin_len + sizeof(*fft_report))) {
+ ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n",
+ bin_len, data_len);
+ return -EINVAL;
+ }
+
+ bin_sz = ab->hw_params.spectral.fft_sz + ab->hw_params.spectral.fft_pad_sz;
+ num_bins = bin_len / bin_sz;
+ /* Only In-band bins are useful to user for visualize */
+ num_bins >>= 1;
+
+ if (num_bins < ATH11K_SPECTRAL_MIN_IB_BINS ||
+ num_bins > ATH11K_SPECTRAL_MAX_IB_BINS(ab) ||
+ !is_power_of_2(num_bins)) {
+ ath11k_warn(ab, "Invalid num of bins %d\n", num_bins);
+ return -EINVAL;
+ }
+
+ check_length = sizeof(*fft_report) + (num_bins * ab->hw_params.spectral.fft_sz);
+ ret = ath11k_dbring_validate_buffer(ar, data, check_length);
+ if (ret) {
+ ath11k_warn(ar->ab, "found magic value in fft data, dropping\n");
+ return ret;
+ }
+
+ ret = ath11k_spectral_pull_search(ar, data, &search);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull search report %d\n", ret);
+ return ret;
+ }
+
+ chan_width_mhz = summary->meta.ch_width;
+
+ switch (chan_width_mhz) {
+ case ATH11K_SPECTRAL_20MHZ:
+ case ATH11K_SPECTRAL_40MHZ:
+ case ATH11K_SPECTRAL_80MHZ:
+ fft_sample->chan_width_mhz = chan_width_mhz;
+ break;
+ case ATH11K_SPECTRAL_160MHZ:
+ if (ab->hw_params.spectral.fragment_160mhz) {
+ chan_width_mhz /= 2;
+ fragment_sample = true;
+ }
+ fft_sample->chan_width_mhz = chan_width_mhz;
+ break;
+ default:
+ ath11k_warn(ab, "invalid channel width %d\n", chan_width_mhz);
+ return -EINVAL;
+ }
+
+ length = sizeof(*fft_sample) - sizeof(struct fft_sample_tlv) + num_bins;
+ fft_sample->tlv.type = ATH_FFT_SAMPLE_ATH11K;
+ fft_sample->tlv.length = __cpu_to_be16(length);
+
+ fft_sample->tsf = __cpu_to_be32(search.timestamp);
+ fft_sample->max_magnitude = __cpu_to_be16(search.peak_mag);
+ fft_sample->max_index = FIELD_GET(SPECTRAL_FFT_REPORT_INFO0_PEAK_SIGNED_IDX,
+ __le32_to_cpu(fft_report->info0));
+
+ summary->inb_pwr_db >>= 1;
+ fft_sample->rssi = __cpu_to_be16(summary->inb_pwr_db);
+ fft_sample->noise = __cpu_to_be32(summary->meta.noise_floor[search.chain_idx]);
+
+ freq = summary->meta.freq1;
+ fft_sample->freq1 = __cpu_to_be16(freq);
+
+ freq = summary->meta.freq2;
+ fft_sample->freq2 = __cpu_to_be16(freq);
+
+ /* If freq2 is available then the spectral scan results are fragmented
+ * as primary and secondary
+ */
+ if (fragment_sample && freq) {
+ if (!ar->spectral.is_primary)
+ fft_sample->freq1 = cpu_to_be16(freq);
+
+ /* We have to toggle the is_primary to handle the next report */
+ ar->spectral.is_primary = !ar->spectral.is_primary;
+ }
+
+ ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
+ ab->hw_params.spectral.fft_sz);
+
+ fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index,
+ search.peak_mag,
+ num_bins,
+ fft_sample->data);
+
+ if (ar->spectral.rfs_scan)
+ relay_write(ar->spectral.rfs_scan, fft_sample,
+ length + sizeof(struct fft_sample_tlv));
+
+ return 0;
+}
+
+static int ath11k_spectral_process_data(struct ath11k *ar,
+ struct ath11k_dbring_data *param)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct spectral_tlv *tlv;
+ struct spectral_summary_fft_report *summary = NULL;
+ struct ath11k_spectral_summary_report summ_rpt;
+ struct fft_sample_ath11k *fft_sample = NULL;
+ u8 *data;
+ u32 data_len, i;
+ u8 sign, tag;
+ int tlv_len, sample_sz;
+ int ret;
+ bool quit = false;
+
+ spin_lock_bh(&ar->spectral.lock);
+
+ if (!ar->spectral.enabled) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_MAX_IB_BINS(ab);
+ fft_sample = kmalloc(sample_sz, GFP_ATOMIC);
+ if (!fft_sample) {
+ ret = -ENOBUFS;
+ goto unlock;
+ }
+
+ data = param->data;
+ data_len = param->data_sz;
+ i = 0;
+ while (!quit && (i < data_len)) {
+ if ((i + sizeof(*tlv)) > data_len) {
+ ath11k_warn(ab, "failed to parse spectral tlv hdr at bytes %d\n",
+ i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tlv = (struct spectral_tlv *)&data[i];
+ sign = FIELD_GET(SPECTRAL_TLV_HDR_SIGN,
+ __le32_to_cpu(tlv->header));
+ if (sign != ATH11K_SPECTRAL_SIGNATURE) {
+ ath11k_warn(ab, "Invalid sign 0x%x at bytes %d\n",
+ sign, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN,
+ __le32_to_cpu(tlv->header));
+ /* convert Dword into bytes */
+ tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
+ if ((i + sizeof(*tlv) + tlv_len) > data_len) {
+ ath11k_warn(ab, "failed to parse spectral tlv payload at bytes %d tlv_len:%d data_len:%d\n",
+ i, tlv_len, data_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ tag = FIELD_GET(SPECTRAL_TLV_HDR_TAG,
+ __le32_to_cpu(tlv->header));
+ switch (tag) {
+ case ATH11K_SPECTRAL_TAG_SCAN_SUMMARY:
+ /* HW bug in tlv length of summary report,
+ * HW report 3 DWORD size but the data payload
+ * is 4 DWORD size (16 bytes).
+ * Need to remove this workaround once HW bug fixed
+ */
+ tlv_len = sizeof(*summary) - sizeof(*tlv) +
+ ab->hw_params.spectral.summary_pad_sz;
+
+ if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) {
+ ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n",
+ i, tlv_len);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath11k_dbring_validate_buffer(ar, data, tlv_len);
+ if (ret) {
+ ath11k_warn(ar->ab, "found magic value in spectral summary, dropping\n");
+ goto err;
+ }
+
+ summary = (struct spectral_summary_fft_report *)tlv;
+ ath11k_spectral_pull_summary(ar, &param->meta,
+ summary, &summ_rpt);
+ break;
+ case ATH11K_SPECTRAL_TAG_SCAN_SEARCH:
+ if (tlv_len < (sizeof(struct spectral_search_fft_report) -
+ sizeof(*tlv))) {
+ ath11k_warn(ab, "failed to parse spectral search fft at bytes %d\n",
+ i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ memset(fft_sample, 0, sample_sz);
+ ret = ath11k_spectral_process_fft(ar, &summ_rpt, tlv,
+ fft_sample,
+ data_len - i);
+ if (ret) {
+ ath11k_warn(ab, "failed to process spectral fft at bytes %d\n",
+ i);
+ goto err;
+ }
+ quit = true;
+ break;
+ }
+
+ i += sizeof(*tlv) + tlv_len;
+ }
+
+ ret = 0;
+
+err:
+ kfree(fft_sample);
+unlock:
+ spin_unlock_bh(&ar->spectral.lock);
+ return ret;
+}
+
+static int ath11k_spectral_ring_alloc(struct ath11k *ar,
+ struct ath11k_dbring_cap *db_cap)
+{
+ struct ath11k_spectral *sp = &ar->spectral;
+ int ret;
+
+ ret = ath11k_dbring_srng_setup(ar, &sp->rx_ring,
+ 0, db_cap->min_elem);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring\n");
+ return ret;
+ }
+
+ ath11k_dbring_set_cfg(ar, &sp->rx_ring,
+ ATH11K_SPECTRAL_NUM_RESP_PER_EVENT,
+ ATH11K_SPECTRAL_EVENT_TIMEOUT_MS,
+ ath11k_spectral_process_data);
+
+ ret = ath11k_dbring_buf_setup(ar, &sp->rx_ring, db_cap);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring buffer\n");
+ goto srng_cleanup;
+ }
+
+ ret = ath11k_dbring_wmi_cfg_setup(ar, &sp->rx_ring,
+ WMI_DIRECT_BUF_SPECTRAL);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
+ goto buffer_cleanup;
+ }
+
+ return 0;
+
+buffer_cleanup:
+ ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
+srng_cleanup:
+ ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
+ return ret;
+}
+
+static inline void ath11k_spectral_ring_free(struct ath11k *ar)
+{
+ struct ath11k_spectral *sp = &ar->spectral;
+
+ ath11k_dbring_srng_cleanup(ar, &sp->rx_ring);
+ ath11k_dbring_buf_cleanup(ar, &sp->rx_ring);
+}
+
+static inline void ath11k_spectral_debug_unregister(struct ath11k *ar)
+{
+ debugfs_remove(ar->spectral.scan_bins);
+ ar->spectral.scan_bins = NULL;
+
+ debugfs_remove(ar->spectral.scan_count);
+ ar->spectral.scan_count = NULL;
+
+ debugfs_remove(ar->spectral.scan_ctl);
+ ar->spectral.scan_ctl = NULL;
+
+ if (ar->spectral.rfs_scan) {
+ relay_close(ar->spectral.rfs_scan);
+ ar->spectral.rfs_scan = NULL;
+ }
+}
+
+int ath11k_spectral_vif_stop(struct ath11k_vif *arvif)
+{
+ if (!arvif->spectral_enabled)
+ return 0;
+
+ return ath11k_spectral_scan_config(arvif->ar, ATH11K_SPECTRAL_DISABLED);
+}
+
+void ath11k_spectral_reset_buffer(struct ath11k *ar)
+{
+ if (!ar->spectral.enabled)
+ return;
+
+ if (ar->spectral.rfs_scan)
+ relay_reset(ar->spectral.rfs_scan);
+}
+
+void ath11k_spectral_deinit(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_spectral *sp;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ sp = &ar->spectral;
+
+ if (!sp->enabled)
+ continue;
+
+ mutex_lock(&ar->conf_mutex);
+ ath11k_spectral_scan_config(ar, ATH11K_SPECTRAL_DISABLED);
+ mutex_unlock(&ar->conf_mutex);
+
+ spin_lock_bh(&sp->lock);
+ sp->enabled = false;
+ spin_unlock_bh(&sp->lock);
+
+ ath11k_spectral_debug_unregister(ar);
+ ath11k_spectral_ring_free(ar);
+ }
+}
+
+static inline int ath11k_spectral_debug_register(struct ath11k *ar)
+{
+ int ret;
+
+ ar->spectral.rfs_scan = relay_open("spectral_scan",
+ ar->debug.debugfs_pdev,
+ ATH11K_SPECTRAL_SUB_BUFF_SIZE(ar->ab),
+ ATH11K_SPECTRAL_NUM_SUB_BUF,
+ &rfs_scan_cb, NULL);
+ if (!ar->spectral.rfs_scan) {
+ ath11k_warn(ar->ab, "failed to open relay in pdev %d\n",
+ ar->pdev_idx);
+ return -EINVAL;
+ }
+
+ ar->spectral.scan_ctl = debugfs_create_file("spectral_scan_ctl",
+ 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_scan_ctl);
+ if (!ar->spectral.scan_ctl) {
+ ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
+ ar->pdev_idx);
+ ret = -EINVAL;
+ goto debug_unregister;
+ }
+
+ ar->spectral.scan_count = debugfs_create_file("spectral_count",
+ 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_scan_count);
+ if (!ar->spectral.scan_count) {
+ ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
+ ar->pdev_idx);
+ ret = -EINVAL;
+ goto debug_unregister;
+ }
+
+ ar->spectral.scan_bins = debugfs_create_file("spectral_bins",
+ 0600,
+ ar->debug.debugfs_pdev, ar,
+ &fops_scan_bins);
+ if (!ar->spectral.scan_bins) {
+ ath11k_warn(ar->ab, "failed to open debugfs in pdev %d\n",
+ ar->pdev_idx);
+ ret = -EINVAL;
+ goto debug_unregister;
+ }
+
+ return 0;
+
+debug_unregister:
+ ath11k_spectral_debug_unregister(ar);
+ return ret;
+}
+
+int ath11k_spectral_init(struct ath11k_base *ab)
+{
+ struct ath11k *ar;
+ struct ath11k_spectral *sp;
+ struct ath11k_dbring_cap db_cap;
+ int ret;
+ int i;
+
+ if (!test_bit(WMI_TLV_SERVICE_FREQINFO_IN_METADATA,
+ ab->wmi_ab.svc_map))
+ return 0;
+
+ if (!ab->hw_params.spectral.fft_sz)
+ return 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ sp = &ar->spectral;
+
+ ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx,
+ WMI_DIRECT_BUF_SPECTRAL,
+ &db_cap);
+ if (ret)
+ continue;
+
+ idr_init(&sp->rx_ring.bufs_idr);
+ spin_lock_init(&sp->rx_ring.idr_lock);
+ spin_lock_init(&sp->lock);
+
+ ret = ath11k_spectral_ring_alloc(ar, &db_cap);
+ if (ret) {
+ ath11k_warn(ab, "failed to init spectral ring for pdev %d\n",
+ i);
+ goto deinit;
+ }
+
+ spin_lock_bh(&sp->lock);
+
+ sp->mode = ATH11K_SPECTRAL_DISABLED;
+ sp->count = ATH11K_WMI_SPECTRAL_COUNT_DEFAULT;
+ sp->fft_size = ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT;
+ sp->enabled = true;
+
+ spin_unlock_bh(&sp->lock);
+
+ ret = ath11k_spectral_debug_register(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to register spectral for pdev %d\n",
+ i);
+ goto deinit;
+ }
+ }
+
+ return 0;
+
+deinit:
+ ath11k_spectral_deinit(ab);
+ return ret;
+}
+
+enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar)
+{
+ if (ar->spectral.enabled)
+ return ar->spectral.mode;
+ else
+ return ATH11K_SPECTRAL_DISABLED;
+}
+
+struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar)
+{
+ if (ar->spectral.enabled)
+ return &ar->spectral.rx_ring;
+ else
+ return NULL;
+}
diff --git a/drivers/net/wireless/ath/ath11k/spectral.h b/drivers/net/wireless/ath/ath11k/spectral.h
new file mode 100644
index 0000000000..96bfa16e18
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/spectral.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef ATH11K_SPECTRAL_H
+#define ATH11K_SPECTRAL_H
+
+#include "../spectral_common.h"
+#include "dbring.h"
+
+/* enum ath11k_spectral_mode:
+ *
+ * @SPECTRAL_DISABLED: spectral mode is disabled
+ * @SPECTRAL_BACKGROUND: hardware sends samples when it is not busy with
+ * something else.
+ * @SPECTRAL_MANUAL: spectral scan is enabled, triggering for samples
+ * is performed manually.
+ */
+enum ath11k_spectral_mode {
+ ATH11K_SPECTRAL_DISABLED = 0,
+ ATH11K_SPECTRAL_BACKGROUND,
+ ATH11K_SPECTRAL_MANUAL,
+};
+
+struct ath11k_spectral {
+ struct ath11k_dbring rx_ring;
+ /* Protects enabled */
+ spinlock_t lock;
+ struct rchan *rfs_scan; /* relay(fs) channel for spectral scan */
+ struct dentry *scan_ctl;
+ struct dentry *scan_count;
+ struct dentry *scan_bins;
+ enum ath11k_spectral_mode mode;
+ u16 count;
+ u8 fft_size;
+ bool enabled;
+ bool is_primary;
+};
+
+#ifdef CONFIG_ATH11K_SPECTRAL
+
+int ath11k_spectral_init(struct ath11k_base *ab);
+void ath11k_spectral_deinit(struct ath11k_base *ab);
+int ath11k_spectral_vif_stop(struct ath11k_vif *arvif);
+void ath11k_spectral_reset_buffer(struct ath11k *ar);
+enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar);
+struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar);
+
+#else
+
+static inline int ath11k_spectral_init(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_spectral_deinit(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_spectral_vif_stop(struct ath11k_vif *arvif)
+{
+ return 0;
+}
+
+static inline void ath11k_spectral_reset_buffer(struct ath11k *ar)
+{
+}
+
+static inline
+enum ath11k_spectral_mode ath11k_spectral_get_mode(struct ath11k *ar)
+{
+ return ATH11K_SPECTRAL_DISABLED;
+}
+
+static inline
+struct ath11k_dbring *ath11k_spectral_get_dbring(struct ath11k *ar)
+{
+ return NULL;
+}
+
+#endif /* CONFIG_ATH11K_SPECTRAL */
+#endif /* ATH11K_SPECTRAL_H */
diff --git a/drivers/net/wireless/ath/ath11k/testmode.c b/drivers/net/wireless/ath/ath11k/testmode.c
new file mode 100644
index 0000000000..43bb23265d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.c
@@ -0,0 +1,502 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "testmode.h"
+#include <net/netlink.h>
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "core.h"
+#include "testmode_i.h"
+
+#define ATH11K_FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
+#define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
+
+static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
+ [ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = ATH11K_TM_DATA_MAX_LEN },
+ [ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
+ [ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
+};
+
+static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
+{
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar = NULL;
+ int i;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+
+ if (ar && ar->state == ATH11K_STATE_FTM)
+ break;
+ }
+
+ return ar;
+}
+
+/* This function handles unsegmented events. Data in various events are aggregated
+ * in application layer, this event is unsegmented from host perspective.
+ */
+static void ath11k_tm_wmi_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ struct sk_buff *nl_skb;
+ struct ath11k *ar;
+
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "event wmi cmd_id %d skb length %d\n",
+ cmd_id, skb->len);
+ ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+ ar = ath11k_tm_get_ar(ab);
+ if (!ar) {
+ ath11k_warn(ab, "testmode event not handled due to invalid pdev\n");
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)) +
+ nla_total_size(skb->len),
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath11k_warn(ab,
+ "failed to allocate skb for unsegmented testmode wmi event\n");
+ goto out;
+ }
+
+ if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) ||
+ nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) {
+ ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
+ kfree_skb(nl_skb);
+ goto out;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+ spin_unlock_bh(&ar->data_lock);
+ return;
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ ath11k_warn(ab, "Failed to send testmode event to higher layers\n");
+}
+
+/* This function handles segmented events. Data of various events received
+ * from firmware is aggregated and sent to application layer
+ */
+static int ath11k_tm_process_event(struct ath11k_base *ab, u32 cmd_id,
+ const struct wmi_ftm_event_msg *ftm_msg,
+ u16 length)
+{
+ struct sk_buff *nl_skb;
+ int ret = 0;
+ struct ath11k *ar;
+ u8 const *buf_pos;
+ u16 datalen;
+ u8 total_segments, current_seq;
+ u32 data_pos;
+ u32 pdev_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+ cmd_id, ftm_msg, length);
+ ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", ftm_msg, length);
+ pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
+
+ if (pdev_id >= ab->num_radios) {
+ ath11k_warn(ab, "testmode event not handled due to invalid pdev id: %d\n",
+ pdev_id);
+ return -EINVAL;
+ }
+
+ ar = ab->pdevs[pdev_id].ar;
+ if (!ar) {
+ ath11k_warn(ab, "testmode event not handled due to absence of pdev\n");
+ return -ENODEV;
+ }
+
+ current_seq = FIELD_GET(ATH11K_FTM_SEGHDR_CURRENT_SEQ,
+ ftm_msg->seg_hdr.segmentinfo);
+ total_segments = FIELD_GET(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS,
+ ftm_msg->seg_hdr.segmentinfo);
+ datalen = length - (sizeof(struct wmi_ftm_seg_hdr));
+ buf_pos = ftm_msg->data;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (current_seq == 0) {
+ ab->testmode.expected_seq = 0;
+ ab->testmode.data_pos = 0;
+ }
+
+ data_pos = ab->testmode.data_pos;
+
+ if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) {
+ ath11k_warn(ab, "Invalid ftm event length at %d: %d\n",
+ data_pos, datalen);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->testmode.eventdata[data_pos], buf_pos, datalen);
+ data_pos += datalen;
+
+ if (++ab->testmode.expected_seq != total_segments) {
+ ab->testmode.data_pos = data_pos;
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "partial data received current_seq %d total_seg %d\n",
+ current_seq, total_segments);
+ goto out;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
+ "total data length pos %d len %d\n",
+ data_pos, ftm_msg->seg_hdr.len);
+ nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+ 2 * nla_total_size(sizeof(u32)) +
+ nla_total_size(data_pos),
+ GFP_ATOMIC);
+ if (!nl_skb) {
+ ath11k_warn(ab,
+ "failed to allocate skb for segmented testmode wmi event\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD,
+ ATH11K_TM_CMD_WMI_FTM) ||
+ nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
+ nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos,
+ &ab->testmode.eventdata[0])) {
+ ath11k_warn(ab, "failed to populate segmented testmode event");
+ kfree_skb(nl_skb);
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+ return ret;
+}
+
+static void ath11k_tm_wmi_event_segmented(struct ath11k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_ftm_event_msg *ev;
+ u16 length;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_ARRAY_BYTE];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch ftm msg\n");
+ kfree(tb);
+ return;
+ }
+
+ length = skb->len - TLV_HDR_SIZE;
+ ret = ath11k_tm_process_event(ab, cmd_id, ev, length);
+ if (ret)
+ ath11k_warn(ab, "Failed to process ftm event\n");
+
+ kfree(tb);
+}
+
+void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id, struct sk_buff *skb)
+{
+ if (test_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
+ ath11k_tm_wmi_event_segmented(ab, cmd_id, skb);
+ else
+ ath11k_tm_wmi_event_unsegmented(ab, cmd_id, skb);
+}
+
+static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct sk_buff *skb;
+ int ret;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "cmd get version_major %d version_minor %d\n",
+ ATH11K_TESTMODE_VERSION_MAJOR,
+ ATH11K_TESTMODE_VERSION_MINOR);
+
+ skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
+ nla_total_size(sizeof(u32)));
+ if (!skb)
+ return -ENOMEM;
+
+ ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
+ ATH11K_TESTMODE_VERSION_MAJOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
+ ATH11K_TESTMODE_VERSION_MINOR);
+ if (ret) {
+ kfree_skb(skb);
+ return ret;
+ }
+
+ return cfg80211_testmode_reply(skb);
+}
+
+static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
+{
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH11K_STATE_FTM) {
+ ret = -EALREADY;
+ goto err;
+ }
+
+ /* start utf only when the driver is not in use */
+ if (ar->state != ATH11K_STATE_OFF) {
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH,
+ GFP_KERNEL);
+ if (!ar->ab->testmode.eventdata) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ar->state = ATH11K_STATE_FTM;
+ ar->ftm_msgref = 0;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, "cmd start\n");
+ return 0;
+
+err:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct sk_buff *skb;
+ struct ath11k_vif *arvif;
+ u32 cmd_id, buf_len;
+ int ret, tag;
+ void *buf;
+ u32 *ptr;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (!tb[ATH11K_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ if (!buf_len) {
+ ath11k_warn(ar->ab, "No data present in testmode wmi command\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
+
+ /* Make sure that the buffer length is long enough to
+ * hold TLV and pdev/vdev id.
+ */
+ if (buf_len < sizeof(struct wmi_tlv) + sizeof(u32)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ptr = buf;
+ tag = FIELD_GET(WMI_TLV_TAG, *ptr);
+
+ /* pdev/vdev id start after TLV header */
+ ptr++;
+
+ if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
+ *ptr = ar->pdev->pdev_id;
+
+ if (ar->ab->fw_mode != ATH11K_FIRMWARE_MODE_FTM &&
+ (tag == WMI_TAG_VDEV_SET_PARAM_CMD || tag == WMI_TAG_UNIT_TEST_CMD)) {
+ if (vif) {
+ arvif = ath11k_vif_to_arvif(vif);
+ *ptr = arvif->vdev_id;
+ } else {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "cmd wmi cmd_id %d buf length %d\n",
+ cmd_id, buf_len);
+
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb->data, buf, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
+ if (ret) {
+ dev_kfree_skb(skb);
+ ath11k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_tm_cmd_wmi_ftm(struct ath11k *ar, struct nlattr *tb[])
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = ar->ab;
+ struct sk_buff *skb;
+ u32 cmd_id, buf_len, hdr_info;
+ int ret;
+ void *buf;
+ u8 segnumber = 0, seginfo;
+ u16 chunk_len, total_bytes, num_segments;
+ u8 *bufpos;
+ struct wmi_ftm_cmd *ftm_cmd;
+
+ set_bit(ATH11K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_FTM) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (!tb[ATH11K_TM_ATTR_DATA]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+ cmd_id = WMI_PDEV_UTF_CMDID;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
+ "cmd wmi ftm cmd_id %d buffer length %d\n",
+ cmd_id, buf_len);
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+ bufpos = buf;
+ total_bytes = buf_len;
+ num_segments = total_bytes / MAX_WMI_UTF_LEN;
+
+ if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+ num_segments++;
+
+ while (buf_len) {
+ chunk_len = min_t(u16, buf_len, MAX_WMI_UTF_LEN);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
+ sizeof(struct wmi_ftm_cmd)));
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
+ hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, (chunk_len +
+ sizeof(struct wmi_ftm_seg_hdr)));
+ ftm_cmd->tlv_header = hdr_info;
+ ftm_cmd->seg_hdr.len = total_bytes;
+ ftm_cmd->seg_hdr.msgref = ar->ftm_msgref;
+ seginfo = FIELD_PREP(ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
+ FIELD_PREP(ATH11K_FTM_SEGHDR_CURRENT_SEQ, segnumber);
+ ftm_cmd->seg_hdr.segmentinfo = seginfo;
+ segnumber++;
+
+ memcpy(&ftm_cmd->data, bufpos, chunk_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send wmi ftm command: %d\n", ret);
+ goto out;
+ }
+
+ buf_len -= chunk_len;
+ bufpos += chunk_len;
+ }
+
+ ar->ftm_msgref++;
+ ret = 0;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct ath11k *ar = hw->priv;
+ struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
+ int ret;
+
+ ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[ATH11K_TM_ATTR_CMD])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
+ case ATH11K_TM_CMD_GET_VERSION:
+ return ath11k_tm_cmd_get_version(ar, tb);
+ case ATH11K_TM_CMD_WMI:
+ return ath11k_tm_cmd_wmi(ar, tb, vif);
+ case ATH11K_TM_CMD_TESTMODE_START:
+ return ath11k_tm_cmd_testmode_start(ar, tb);
+ case ATH11K_TM_CMD_WMI_FTM:
+ return ath11k_tm_cmd_wmi_ftm(ar, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/testmode.h b/drivers/net/wireless/ath/ath11k/testmode.h
new file mode 100644
index 0000000000..2f62f2c442
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id, struct sk_buff *skb);
+int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+
+#else
+
+static inline void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id,
+ struct sk_buff *skb)
+{
+}
+
+static inline int ath11k_tm_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ return 0;
+}
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/testmode_i.h b/drivers/net/wireless/ath/ath11k/testmode_i.h
new file mode 100644
index 0000000000..91b83873d6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/testmode_i.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* "API" level of the ath11k testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH11K_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH11K_TESTMODE_VERSION_MINOR 1
+
+#define ATH11K_TM_DATA_MAX_LEN 5000
+#define ATH11K_FTM_EVENT_MAX_BUF_LENGTH 2048
+
+enum ath11k_tm_attr {
+ __ATH11K_TM_ATTR_INVALID = 0,
+ ATH11K_TM_ATTR_CMD = 1,
+ ATH11K_TM_ATTR_DATA = 2,
+ ATH11K_TM_ATTR_WMI_CMDID = 3,
+ ATH11K_TM_ATTR_VERSION_MAJOR = 4,
+ ATH11K_TM_ATTR_VERSION_MINOR = 5,
+ ATH11K_TM_ATTR_WMI_OP_VERSION = 6,
+
+ /* keep last */
+ __ATH11K_TM_ATTR_AFTER_LAST,
+ ATH11K_TM_ATTR_MAX = __ATH11K_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath11k testmode interface commands specified in
+ * ATH11K_TM_ATTR_CMD
+ */
+enum ath11k_tm_cmd {
+ /* Returns the supported ath11k testmode interface version in
+ * ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space
+ * uses this to verify it's using the correct version of the
+ * testmode interface
+ */
+ ATH11K_TM_CMD_GET_VERSION = 0,
+
+ /* The command used to transmit a WMI command to the firmware and
+ * the event to receive WMI events from the firmware. Without
+ * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+ * provided with ATH11K_TM_ATTR_WMI_CMDID and payload in
+ * ATH11K_TM_ATTR_DATA.
+ */
+ ATH11K_TM_CMD_WMI = 1,
+
+ /* Boots the UTF firmware, the netdev interface must be down at the
+ * time.
+ */
+ ATH11K_TM_CMD_TESTMODE_START = 2,
+
+ /* The command used to transmit a FTM WMI command to the firmware
+ * and the event to receive WMI events from the firmware. The data
+ * received only contain the payload, need to add the tlv header
+ * and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID.
+ * The data payload size could be large and the driver needs to
+ * send segmented data to firmware.
+ */
+ ATH11K_TM_CMD_WMI_FTM = 3,
+};
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
new file mode 100644
index 0000000000..23ed01bd44
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+
+static int
+ath11k_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = ATH11K_THERMAL_THROTTLE_MAX;
+
+ return 0;
+}
+
+static int
+ath11k_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct ath11k *ar = cdev->devdata;
+
+ mutex_lock(&ar->conf_mutex);
+ *state = ar->thermal.throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static int
+ath11k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long throttle_state)
+{
+ struct ath11k *ar = cdev->devdata;
+ int ret;
+
+ if (throttle_state > ATH11K_THERMAL_THROTTLE_MAX) {
+ ath11k_warn(ar->ab, "throttle state %ld is exceeding the limit %d\n",
+ throttle_state, ATH11K_THERMAL_THROTTLE_MAX);
+ return -EINVAL;
+ }
+ mutex_lock(&ar->conf_mutex);
+ ret = ath11k_thermal_set_throttling(ar, throttle_state);
+ if (ret == 0)
+ ar->thermal.throttle_state = throttle_state;
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct thermal_cooling_device_ops ath11k_thermal_ops = {
+ .get_max_state = ath11k_thermal_get_max_throttle_state,
+ .get_cur_state = ath11k_thermal_get_cur_throttle_state,
+ .set_cur_state = ath11k_thermal_set_cur_throttle_state,
+};
+
+static ssize_t ath11k_thermal_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ath11k *ar = dev_get_drvdata(dev);
+ int ret, temperature;
+ unsigned long time_left;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* Can't get temperature when the card is off */
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ reinit_completion(&ar->thermal.wmi_sync);
+ ret = ath11k_wmi_send_pdev_temperature_cmd(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to read temperature %d\n", ret);
+ goto out;
+ }
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) {
+ ret = -ESHUTDOWN;
+ goto out;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+ ATH11K_THERMAL_SYNC_TIMEOUT_HZ);
+ if (!time_left) {
+ ath11k_warn(ar->ab, "failed to synchronize thermal read\n");
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ temperature = ar->thermal.temperature;
+ spin_unlock_bh(&ar->data_lock);
+
+ /* display in millidegree Celsius */
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->thermal.temperature = temperature;
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath11k_thermal_show_temp,
+ NULL, 0);
+
+static struct attribute *ath11k_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(ath11k_hwmon);
+
+int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
+{
+ struct ath11k_base *sc = ar->ab;
+ struct thermal_mitigation_params param;
+ int ret = 0;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return 0;
+
+ memset(&param, 0, sizeof(param));
+ param.pdev_id = ar->pdev->pdev_id;
+ param.enable = throttle_state ? 1 : 0;
+ param.dc = ATH11K_THERMAL_DEFAULT_DUTY_CYCLE;
+ param.dc_per_event = 0xFFFFFFFF;
+
+ param.levelconf[0].tmplwm = ATH11K_THERMAL_TEMP_LOW_MARK;
+ param.levelconf[0].tmphwm = ATH11K_THERMAL_TEMP_HIGH_MARK;
+ param.levelconf[0].dcoffpercent = throttle_state;
+ param.levelconf[0].priority = 0; /* disable all data tx queues */
+
+ ret = ath11k_wmi_send_thermal_mitigation_param_cmd(ar, &param);
+ if (ret) {
+ ath11k_warn(sc, "failed to send thermal mitigation duty cycle %u ret %d\n",
+ throttle_state, ret);
+ }
+
+ return ret;
+}
+
+int ath11k_thermal_register(struct ath11k_base *sc)
+{
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon_dev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i, ret;
+
+ for (i = 0; i < sc->num_radios; i++) {
+ pdev = &sc->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ cdev = thermal_cooling_device_register("ath11k_thermal", ar,
+ &ath11k_thermal_ops);
+
+ if (IS_ERR(cdev)) {
+ ath11k_err(sc, "failed to setup thermal device result: %ld\n",
+ PTR_ERR(cdev));
+ ret = -EINVAL;
+ goto err_thermal_destroy;
+ }
+
+ ar->thermal.cdev = cdev;
+
+ ret = sysfs_create_link(&ar->hw->wiphy->dev.kobj, &cdev->device.kobj,
+ "cooling_device");
+ if (ret) {
+ ath11k_err(sc, "failed to create cooling device symlink\n");
+ goto err_thermal_destroy;
+ }
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&ar->hw->wiphy->dev,
+ "ath11k_hwmon", ar,
+ ath11k_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ ath11k_err(ar->ab, "failed to register hwmon device: %ld\n",
+ PTR_ERR(hwmon_dev));
+ ret = -EINVAL;
+ goto err_thermal_destroy;
+ }
+ }
+
+ return 0;
+
+err_thermal_destroy:
+ ath11k_thermal_unregister(sc);
+ return ret;
+}
+
+void ath11k_thermal_unregister(struct ath11k_base *sc)
+{
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ int i;
+
+ for (i = 0; i < sc->num_radios; i++) {
+ pdev = &sc->pdevs[i];
+ ar = pdev->ar;
+ if (!ar)
+ continue;
+
+ sysfs_remove_link(&ar->hw->wiphy->dev.kobj, "cooling_device");
+ thermal_cooling_device_unregister(ar->thermal.cdev);
+ }
+}
diff --git a/drivers/net/wireless/ath/ath11k/thermal.h b/drivers/net/wireless/ath/ath11k/thermal.h
new file mode 100644
index 0000000000..3e39675ef7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/thermal.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_THERMAL_
+#define _ATH11K_THERMAL_
+
+#define ATH11K_THERMAL_TEMP_LOW_MARK -100
+#define ATH11K_THERMAL_TEMP_HIGH_MARK 150
+#define ATH11K_THERMAL_THROTTLE_MAX 100
+#define ATH11K_THERMAL_DEFAULT_DUTY_CYCLE 100
+#define ATH11K_HWMON_NAME_LEN 15
+#define ATH11K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+
+struct ath11k_thermal {
+ struct thermal_cooling_device *cdev;
+ struct completion wmi_sync;
+
+ /* protected by conf_mutex */
+ u32 throttle_state;
+ /* temperature value in Celsius degree
+ * protected by data_lock
+ */
+ int temperature;
+};
+
+#if IS_REACHABLE(CONFIG_THERMAL)
+int ath11k_thermal_register(struct ath11k_base *sc);
+void ath11k_thermal_unregister(struct ath11k_base *sc);
+int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state);
+void ath11k_thermal_event_temperature(struct ath11k *ar, int temperature);
+#else
+static inline int ath11k_thermal_register(struct ath11k_base *sc)
+{
+ return 0;
+}
+
+static inline void ath11k_thermal_unregister(struct ath11k_base *sc)
+{
+}
+
+static inline int ath11k_thermal_set_throttling(struct ath11k *ar, u32 throttle_state)
+{
+ return 0;
+}
+
+static inline void ath11k_thermal_event_temperature(struct ath11k *ar,
+ int temperature)
+{
+}
+
+#endif
+#endif /* _ATH11K_THERMAL_ */
diff --git a/drivers/net/wireless/ath/ath11k/trace.c b/drivers/net/wireless/ath/ath11k/trace.c
new file mode 100644
index 0000000000..6620650d78
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+EXPORT_SYMBOL(__tracepoint_ath11k_log_dbg);
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
new file mode 100644
index 0000000000..9535745fe0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -0,0 +1,345 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+
+#include <linux/tracepoint.h>
+#include "core.h"
+
+#define _TRACE_H_
+
+/* create empty functions when tracing is disabled */
+#if !defined(CONFIG_ATH11K_TRACING)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {} \
+static inline bool trace_##name##_enabled(void) \
+{ \
+ return false; \
+}
+
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif /* !CONFIG_ATH11K_TRACING || __CHECKER__ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ath11k
+
+#define ATH11K_MSG_MAX 400
+
+TRACE_EVENT(ath11k_htt_pktlog,
+ TP_PROTO(struct ath11k *ar, const void *buf, u16 buf_len,
+ u32 pktlog_checksum),
+
+ TP_ARGS(ar, buf, buf_len, pktlog_checksum),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, buf_len)
+ __field(u32, pktlog_checksum)
+ __dynamic_array(u8, pktlog, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->buf_len = buf_len;
+ __entry->pktlog_checksum = pktlog_checksum;
+ memcpy(__get_dynamic_array(pktlog), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s size %u pktlog_checksum %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->buf_len,
+ __entry->pktlog_checksum
+ )
+);
+
+TRACE_EVENT(ath11k_htt_ppdu_stats,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+
+ TP_ARGS(ar, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, ppdu, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(ppdu), data, len);
+ ),
+
+ TP_printk(
+ "%s %s ppdu len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath11k_htt_rxdesc,
+ TP_PROTO(struct ath11k *ar, const void *data, size_t log_type, size_t len),
+
+ TP_ARGS(ar, data, log_type, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __field(u16, len)
+ __field(u16, log_type)
+ __dynamic_array(u8, rxdesc, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __entry->len = len;
+ __entry->log_type = log_type;
+ memcpy(__get_dynamic_array(rxdesc), data, len);
+ ),
+
+ TP_printk(
+ "%s %s rxdesc len %d type %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len,
+ __entry->log_type
+ )
+);
+
+DECLARE_EVENT_CLASS(ath11k_log_event,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf),
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __vstring(msg, vaf->fmt, vaf->va)
+ ),
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_vstr(msg, vaf->fmt, vaf->va);
+ ),
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+DEFINE_EVENT(ath11k_log_event, ath11k_log_err,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf)
+);
+
+DEFINE_EVENT(ath11k_log_event, ath11k_log_warn,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf)
+);
+
+DEFINE_EVENT(ath11k_log_event, ath11k_log_info,
+ TP_PROTO(struct ath11k_base *ab, struct va_format *vaf),
+ TP_ARGS(ab, vaf)
+);
+
+TRACE_EVENT(ath11k_wmi_cmd,
+ TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ab, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath11k_wmi_event,
+ TP_PROTO(struct ath11k_base *ab, int id, const void *buf, size_t buf_len),
+
+ TP_ARGS(ab, id, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(unsigned int, id)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->id = id;
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s id %d len %zu",
+ __get_str(driver),
+ __get_str(device),
+ __entry->id,
+ __entry->buf_len
+ )
+);
+
+TRACE_EVENT(ath11k_log_dbg,
+ TP_PROTO(struct ath11k_base *ab, unsigned int level, struct va_format *vaf),
+
+ TP_ARGS(ab, level, vaf),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(unsigned int, level)
+ __dynamic_array(char, msg, ATH11K_MSG_MAX)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->level = level;
+ WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+ ATH11K_MSG_MAX, vaf->fmt,
+ *vaf->va) >= ATH11K_MSG_MAX);
+ ),
+
+ TP_printk(
+ "%s %s %s",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath11k_log_dbg_dump,
+ TP_PROTO(struct ath11k_base *ab, const char *msg, const char *prefix,
+ const void *buf, size_t buf_len),
+
+ TP_ARGS(ab, msg, prefix, buf, buf_len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __string(msg, msg)
+ __string(prefix, prefix)
+ __field(size_t, buf_len)
+ __dynamic_array(u8, buf, buf_len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_str(msg, msg);
+ __assign_str(prefix, prefix);
+ __entry->buf_len = buf_len;
+ memcpy(__get_dynamic_array(buf), buf, buf_len);
+ ),
+
+ TP_printk(
+ "%s %s %s/%s\n",
+ __get_str(driver),
+ __get_str(device),
+ __get_str(prefix),
+ __get_str(msg)
+ )
+);
+
+TRACE_EVENT(ath11k_wmi_diag,
+ TP_PROTO(struct ath11k_base *ab, const void *data, size_t len),
+
+ TP_ARGS(ab, data, len),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(ab->dev))
+ __string(driver, dev_driver_string(ab->dev))
+ __field(u16, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __assign_str(device, dev_name(ab->dev));
+ __assign_str(driver, dev_driver_string(ab->dev));
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk(
+ "%s %s tlv diag len %d",
+ __get_str(driver),
+ __get_str(device),
+ __entry->len
+ )
+);
+
+TRACE_EVENT(ath11k_ps_timekeeper,
+ TP_PROTO(struct ath11k *ar, const void *peer_addr,
+ u32 peer_ps_timestamp, u8 peer_ps_state),
+ TP_ARGS(ar, peer_addr, peer_ps_timestamp, peer_ps_state),
+
+ TP_STRUCT__entry(__string(device, dev_name(ar->ab->dev))
+ __string(driver, dev_driver_string(ar->ab->dev))
+ __dynamic_array(u8, peer_addr, ETH_ALEN)
+ __field(u8, peer_ps_state)
+ __field(u32, peer_ps_timestamp)
+ ),
+
+ TP_fast_assign(__assign_str(device, dev_name(ar->ab->dev));
+ __assign_str(driver, dev_driver_string(ar->ab->dev));
+ memcpy(__get_dynamic_array(peer_addr), peer_addr,
+ ETH_ALEN);
+ __entry->peer_ps_state = peer_ps_state;
+ __entry->peer_ps_timestamp = peer_ps_timestamp;
+ ),
+
+ TP_printk("%s %s %u %u",
+ __get_str(driver),
+ __get_str(device),
+ __entry->peer_ps_state,
+ __entry->peer_ps_timestamp
+ )
+);
+
+#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
+
+/* we don't want to use include/trace/events */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
new file mode 100644
index 0000000000..1c07f55c25
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -0,0 +1,9796 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#include <linux/skbuff.h>
+#include <linux/ctype.h>
+#include <net/mac80211.h>
+#include <net/cfg80211.h>
+#include <linux/completion.h>
+#include <linux/if_ether.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/uuid.h>
+#include <linux/time.h>
+#include <linux/of.h>
+#include "core.h"
+#include "debug.h"
+#include "mac.h"
+#include "hw.h"
+#include "peer.h"
+#include "testmode.h"
+
+struct wmi_tlv_policy {
+ size_t min_len;
+};
+
+struct wmi_tlv_svc_ready_parse {
+ bool wmi_svc_bitmap_done;
+};
+
+struct wmi_tlv_dma_ring_caps_parse {
+ struct wmi_dma_ring_capabilities *dma_ring_caps;
+ u32 n_dma_ring_caps;
+};
+
+struct wmi_tlv_svc_rdy_ext_parse {
+ struct ath11k_service_ext_param param;
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ u32 n_hw_mode_caps;
+ u32 tot_phy_id;
+ struct wmi_hw_mode_capabilities pref_hw_mode_caps;
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ u32 n_mac_phy_caps;
+ struct wmi_soc_hal_reg_capabilities *soc_hal_reg_caps;
+ struct wmi_hal_reg_capabilities_ext *ext_hal_reg_caps;
+ u32 n_ext_hal_reg_caps;
+ struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
+ bool hw_mode_done;
+ bool mac_phy_done;
+ bool ext_hal_reg_done;
+ bool mac_phy_chainmask_combo_done;
+ bool mac_phy_chainmask_cap_done;
+ bool oem_dma_ring_cap_done;
+ bool dma_ring_cap_done;
+};
+
+struct wmi_tlv_svc_rdy_ext2_parse {
+ struct wmi_tlv_dma_ring_caps_parse dma_caps_parse;
+ bool dma_ring_cap_done;
+};
+
+struct wmi_tlv_rdy_parse {
+ u32 num_extra_mac_addr;
+};
+
+struct wmi_tlv_dma_buf_release_parse {
+ struct ath11k_wmi_dma_buf_release_fixed_param fixed;
+ struct wmi_dma_buf_release_entry *buf_entry;
+ struct wmi_dma_buf_release_meta_data *meta_data;
+ u32 num_buf_entry;
+ u32 num_meta;
+ bool buf_entry_done;
+ bool meta_data_done;
+};
+
+struct wmi_tlv_fw_stats_parse {
+ const struct wmi_stats_event *ev;
+ const struct wmi_per_chain_rssi_stats *rssi;
+ struct ath11k_fw_stats *stats;
+ int rssi_num;
+ bool chain_rssi_done;
+};
+
+struct wmi_tlv_mgmt_rx_parse {
+ const struct wmi_mgmt_rx_hdr *fixed;
+ const u8 *frame_buf;
+ bool frame_buf_done;
+};
+
+static const struct wmi_tlv_policy wmi_tlv_policies[] = {
+ [WMI_TAG_ARRAY_BYTE]
+ = { .min_len = 0 },
+ [WMI_TAG_ARRAY_UINT32]
+ = { .min_len = 0 },
+ [WMI_TAG_SERVICE_READY_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_event) },
+ [WMI_TAG_SERVICE_READY_EXT_EVENT]
+ = { .min_len = sizeof(struct wmi_service_ready_ext_event) },
+ [WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS]
+ = { .min_len = sizeof(struct wmi_soc_mac_phy_hw_mode_caps) },
+ [WMI_TAG_SOC_HAL_REG_CAPABILITIES]
+ = { .min_len = sizeof(struct wmi_soc_hal_reg_capabilities) },
+ [WMI_TAG_VDEV_START_RESPONSE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_start_resp_event) },
+ [WMI_TAG_PEER_DELETE_RESP_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_delete_resp_event) },
+ [WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT]
+ = { .min_len = sizeof(struct wmi_bcn_tx_status_event) },
+ [WMI_TAG_VDEV_STOPPED_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_stopped_event) },
+ [WMI_TAG_REG_CHAN_LIST_CC_EVENT]
+ = { .min_len = sizeof(struct wmi_reg_chan_list_cc_event) },
+ [WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT]
+ = { .min_len = sizeof(struct wmi_reg_chan_list_cc_ext_event) },
+ [WMI_TAG_MGMT_RX_HDR]
+ = { .min_len = sizeof(struct wmi_mgmt_rx_hdr) },
+ [WMI_TAG_MGMT_TX_COMPL_EVENT]
+ = { .min_len = sizeof(struct wmi_mgmt_tx_compl_event) },
+ [WMI_TAG_SCAN_EVENT]
+ = { .min_len = sizeof(struct wmi_scan_event) },
+ [WMI_TAG_PEER_STA_KICKOUT_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_sta_kickout_event) },
+ [WMI_TAG_ROAM_EVENT]
+ = { .min_len = sizeof(struct wmi_roam_event) },
+ [WMI_TAG_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_chan_info_event) },
+ [WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_bss_chan_info_event) },
+ [WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT]
+ = { .min_len = sizeof(struct wmi_vdev_install_key_compl_event) },
+ [WMI_TAG_READY_EVENT] = {
+ .min_len = sizeof(struct wmi_ready_event_min) },
+ [WMI_TAG_SERVICE_AVAILABLE_EVENT]
+ = {.min_len = sizeof(struct wmi_service_available_event) },
+ [WMI_TAG_PEER_ASSOC_CONF_EVENT]
+ = { .min_len = sizeof(struct wmi_peer_assoc_conf_event) },
+ [WMI_TAG_STATS_EVENT]
+ = { .min_len = sizeof(struct wmi_stats_event) },
+ [WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT]
+ = { .min_len = sizeof(struct wmi_pdev_ctl_failsafe_chk_event) },
+ [WMI_TAG_HOST_SWFDA_EVENT] = {
+ .min_len = sizeof(struct wmi_fils_discovery_event) },
+ [WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT] = {
+ .min_len = sizeof(struct wmi_probe_resp_tx_status_event) },
+ [WMI_TAG_VDEV_DELETE_RESP_EVENT] = {
+ .min_len = sizeof(struct wmi_vdev_delete_resp_event) },
+ [WMI_TAG_OBSS_COLOR_COLLISION_EVT] = {
+ .min_len = sizeof(struct wmi_obss_color_collision_event) },
+ [WMI_TAG_11D_NEW_COUNTRY_EVENT] = {
+ .min_len = sizeof(struct wmi_11d_new_cc_ev) },
+ [WMI_TAG_PER_CHAIN_RSSI_STATS] = {
+ .min_len = sizeof(struct wmi_per_chain_rssi_stats) },
+ [WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
+ .min_len = sizeof(struct wmi_twt_add_dialog_event) },
+};
+
+#define PRIMAP(_hw_mode_) \
+ [_hw_mode_] = _hw_mode_##_PRI
+
+static const int ath11k_hw_mode_pri_map[] = {
+ PRIMAP(WMI_HOST_HW_MODE_SINGLE),
+ PRIMAP(WMI_HOST_HW_MODE_DBS),
+ PRIMAP(WMI_HOST_HW_MODE_SBS_PASSIVE),
+ PRIMAP(WMI_HOST_HW_MODE_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_SBS),
+ PRIMAP(WMI_HOST_HW_MODE_DBS_OR_SBS),
+ /* keep last */
+ PRIMAP(WMI_HOST_HW_MODE_MAX),
+};
+
+static int
+ath11k_wmi_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
+{
+ const void *begin = ptr;
+ const struct wmi_tlv *tlv;
+ u16 tlv_tag, tlv_len;
+ int ret;
+
+ while (len > 0) {
+ if (len < sizeof(*tlv)) {
+ ath11k_err(ab, "wmi tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
+ ptr - begin, len, sizeof(*tlv));
+ return -EINVAL;
+ }
+
+ tlv = ptr;
+ tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+ tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+ ptr += sizeof(*tlv);
+ len -= sizeof(*tlv);
+
+ if (tlv_len > len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
+ tlv_tag, ptr - begin, len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_tag < ARRAY_SIZE(wmi_tlv_policies) &&
+ wmi_tlv_policies[tlv_tag].min_len &&
+ wmi_tlv_policies[tlv_tag].min_len > tlv_len) {
+ ath11k_err(ab, "wmi tlv parse failure of tag %u at byte %zd (%u bytes is less than min length %zu)\n",
+ tlv_tag, ptr - begin, tlv_len,
+ wmi_tlv_policies[tlv_tag].min_len);
+ return -EINVAL;
+ }
+
+ ret = iter(ab, tlv_tag, tlv_len, ptr, data);
+ if (ret)
+ return ret;
+
+ ptr += tlv_len;
+ len -= tlv_len;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_iter_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const void **tb = data;
+
+ if (tag < WMI_TAG_MAX)
+ tb[tag] = ptr;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_parse(struct ath11k_base *ar, const void **tb,
+ const void *ptr, size_t len)
+{
+ return ath11k_wmi_tlv_iter(ar, ptr, len, ath11k_wmi_tlv_iter_parse,
+ (void *)tb);
+}
+
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
+ size_t len, gfp_t gfp)
+{
+ const void **tb;
+ int ret;
+
+ tb = kcalloc(WMI_TAG_MAX, sizeof(*tb), gfp);
+ if (!tb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = ath11k_wmi_tlv_parse(ab, tb, ptr, len);
+ if (ret) {
+ kfree(tb);
+ return ERR_PTR(ret);
+ }
+
+ return tb;
+}
+
+static int ath11k_wmi_cmd_send_nowait(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_cmd_hdr *cmd_hdr;
+ int ret;
+ u32 cmd = 0;
+
+ if (skb_push(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ return -ENOMEM;
+
+ cmd |= FIELD_PREP(WMI_CMD_HDR_CMD_ID, cmd_id);
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ cmd_hdr->cmd_id = cmd;
+
+ trace_ath11k_wmi_cmd(ab, cmd_id, skb->data, skb->len);
+
+ memset(skb_cb, 0, sizeof(*skb_cb));
+ ret = ath11k_htc_send(&ab->htc, wmi->eid, skb);
+
+ if (ret)
+ goto err_pull;
+
+ return 0;
+
+err_pull:
+ skb_pull(skb, sizeof(struct wmi_cmd_hdr));
+ return ret;
+}
+
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id)
+{
+ struct ath11k_wmi_base *wmi_sc = wmi->wmi_ab;
+ int ret = -EOPNOTSUPP;
+ struct ath11k_base *ab = wmi_sc->ab;
+
+ might_sleep();
+
+ if (ab->hw_params.credit_flow) {
+ wait_event_timeout(wmi_sc->tx_credits_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -EAGAIN);
+ }), WMI_SEND_TIMEOUT_HZ);
+ } else {
+ wait_event_timeout(wmi->tx_ce_desc_wq, ({
+ ret = ath11k_wmi_cmd_send_nowait(wmi, skb, cmd_id);
+
+ if (ret && test_bit(ATH11K_FLAG_CRASH_FLUSH,
+ &wmi_sc->ab->dev_flags))
+ ret = -ESHUTDOWN;
+
+ (ret != -ENOBUFS);
+ }), WMI_SEND_TIMEOUT_HZ);
+ }
+
+ if (ret == -EAGAIN)
+ ath11k_warn(wmi_sc->ab, "wmi command %d timeout\n", cmd_id);
+
+ if (ret == -ENOBUFS)
+ ath11k_warn(wmi_sc->ab, "ce desc not available for wmi command %d\n",
+ cmd_id);
+
+ return ret;
+}
+
+static int ath11k_pull_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ const void *ptr,
+ struct ath11k_service_ext_param *param)
+{
+ const struct wmi_service_ready_ext_event *ev = ptr;
+
+ if (!ev)
+ return -EINVAL;
+
+ /* Move this to host based bitmap */
+ param->default_conc_scan_config_bits = ev->default_conc_scan_config_bits;
+ param->default_fw_config_bits = ev->default_fw_config_bits;
+ param->he_cap_info = ev->he_cap_info;
+ param->mpdu_density = ev->mpdu_density;
+ param->max_bssid_rx_filters = ev->max_bssid_rx_filters;
+ memcpy(&param->ppet, &ev->ppet, sizeof(param->ppet));
+
+ return 0;
+}
+
+static int
+ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_mac_phy_hw_mode_caps *hw_caps,
+ struct wmi_hw_mode_capabilities *wmi_hw_mode_caps,
+ struct wmi_soc_hal_reg_capabilities *hal_reg_caps,
+ struct wmi_mac_phy_capabilities *wmi_mac_phy_caps,
+ u8 hw_mode_id, u8 phy_id,
+ struct ath11k_pdev *pdev)
+{
+ struct wmi_mac_phy_capabilities *mac_phy_caps;
+ struct ath11k_base *ab = wmi_handle->wmi_ab->ab;
+ struct ath11k_band_cap *cap_band;
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+ u32 phy_map;
+ u32 hw_idx, phy_idx = 0;
+
+ if (!hw_caps || !wmi_hw_mode_caps || !hal_reg_caps)
+ return -EINVAL;
+
+ for (hw_idx = 0; hw_idx < hw_caps->num_hw_modes; hw_idx++) {
+ if (hw_mode_id == wmi_hw_mode_caps[hw_idx].hw_mode_id)
+ break;
+
+ phy_map = wmi_hw_mode_caps[hw_idx].phy_id_map;
+ while (phy_map) {
+ phy_map >>= 1;
+ phy_idx++;
+ }
+ }
+
+ if (hw_idx == hw_caps->num_hw_modes)
+ return -EINVAL;
+
+ phy_idx += phy_id;
+ if (phy_id >= hal_reg_caps->num_phy)
+ return -EINVAL;
+
+ mac_phy_caps = wmi_mac_phy_caps + phy_idx;
+
+ pdev->pdev_id = mac_phy_caps->pdev_id;
+ pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
+ pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
+ ab->target_pdev_ids[ab->target_pdev_count].supported_bands =
+ mac_phy_caps->supported_bands;
+ ab->target_pdev_ids[ab->target_pdev_count].pdev_id = mac_phy_caps->pdev_id;
+ ab->target_pdev_count++;
+
+ if (!(mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+ !(mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP))
+ return -EINVAL;
+
+ /* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
+ * band to band for a single radio, need to see how this should be
+ * handled.
+ */
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_2g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_2g;
+ }
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ pdev_cap->vht_cap = mac_phy_caps->vht_cap_info_5g;
+ pdev_cap->vht_mcs = mac_phy_caps->vht_supp_mcs_5g;
+ pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
+ pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
+ pdev_cap->nss_ratio_enabled =
+ WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
+ pdev_cap->nss_ratio_info =
+ WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
+ }
+
+ /* tx/rx chainmask reported from fw depends on the actual hw chains used,
+ * For example, for 4x4 capable macphys, first 4 chains can be used for first
+ * mac and the remaining 4 chains can be used for the second mac or vice-versa.
+ * In this case, tx/rx chainmask 0xf will be advertised for first mac and 0xf0
+ * will be advertised for second mac or vice-versa. Compute the shift value
+ * for tx/rx chainmask which will be used to advertise supported ht/vht rates to
+ * mac80211.
+ */
+ pdev_cap->tx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->tx_chain_mask, 32);
+ pdev_cap->rx_chain_mask_shift =
+ find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ cap_band->phy_id = mac_phy_caps->phy_id;
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+ cap_band->phy_id = mac_phy_caps->phy_id;
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+
+ cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
+
+ return 0;
+}
+
+static int
+ath11k_pull_reg_cap_svc_rdy_ext(struct ath11k_pdev_wmi *wmi_handle,
+ struct wmi_soc_hal_reg_capabilities *reg_caps,
+ struct wmi_hal_reg_capabilities_ext *wmi_ext_reg_cap,
+ u8 phy_idx,
+ struct ath11k_hal_reg_capabilities_ext *param)
+{
+ struct wmi_hal_reg_capabilities_ext *ext_reg_cap;
+
+ if (!reg_caps || !wmi_ext_reg_cap)
+ return -EINVAL;
+
+ if (phy_idx >= reg_caps->num_phy)
+ return -EINVAL;
+
+ ext_reg_cap = &wmi_ext_reg_cap[phy_idx];
+
+ param->phy_id = ext_reg_cap->phy_id;
+ param->eeprom_reg_domain = ext_reg_cap->eeprom_reg_domain;
+ param->eeprom_reg_domain_ext =
+ ext_reg_cap->eeprom_reg_domain_ext;
+ param->regcap1 = ext_reg_cap->regcap1;
+ param->regcap2 = ext_reg_cap->regcap2;
+ /* check if param->wireless_mode is needed */
+ param->low_2ghz_chan = ext_reg_cap->low_2ghz_chan;
+ param->high_2ghz_chan = ext_reg_cap->high_2ghz_chan;
+ param->low_5ghz_chan = ext_reg_cap->low_5ghz_chan;
+ param->high_5ghz_chan = ext_reg_cap->high_5ghz_chan;
+
+ return 0;
+}
+
+static int ath11k_pull_service_ready_tlv(struct ath11k_base *ab,
+ const void *evt_buf,
+ struct ath11k_targ_cap *cap)
+{
+ const struct wmi_service_ready_event *ev = evt_buf;
+
+ if (!ev) {
+ ath11k_err(ab, "%s: failed by NULL param\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ cap->phy_capability = ev->phy_capability;
+ cap->max_frag_entry = ev->max_frag_entry;
+ cap->num_rf_chains = ev->num_rf_chains;
+ cap->ht_cap_info = ev->ht_cap_info;
+ cap->vht_cap_info = ev->vht_cap_info;
+ cap->vht_supp_mcs = ev->vht_supp_mcs;
+ cap->hw_min_tx_power = ev->hw_min_tx_power;
+ cap->hw_max_tx_power = ev->hw_max_tx_power;
+ cap->sys_cap_info = ev->sys_cap_info;
+ cap->min_pkt_size_enable = ev->min_pkt_size_enable;
+ cap->max_bcn_ie_size = ev->max_bcn_ie_size;
+ cap->max_num_scan_channels = ev->max_num_scan_channels;
+ cap->max_supported_macs = ev->max_supported_macs;
+ cap->wmi_fw_sub_feat_caps = ev->wmi_fw_sub_feat_caps;
+ cap->txrx_chainmask = ev->txrx_chainmask;
+ cap->default_dbs_hw_mode_index = ev->default_dbs_hw_mode_index;
+ cap->num_msdu_desc = ev->num_msdu_desc;
+
+ return 0;
+}
+
+/* Save the wmi_service_bitmap into a linear bitmap. The wmi_services in
+ * wmi_service ready event are advertised in b0-b3 (LSB 4-bits) of each
+ * 4-byte word.
+ */
+static void ath11k_wmi_service_bitmap_copy(struct ath11k_pdev_wmi *wmi,
+ const u32 *wmi_svc_bm)
+{
+ int i, j;
+
+ for (i = 0, j = 0; i < WMI_SERVICE_BM_SIZE && j < WMI_MAX_SERVICE; i++) {
+ do {
+ if (wmi_svc_bm[i] & BIT(j % WMI_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, wmi->wmi_ab->svc_map);
+ } while (++j % WMI_SERVICE_BITS_IN_SIZE32);
+ }
+}
+
+static int ath11k_wmi_tlv_svc_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_ready_parse *svc_ready = data;
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ u16 expect_len;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EVENT:
+ if (ath11k_pull_service_ready_tlv(ab, ptr, &ab->target_caps))
+ return -EINVAL;
+ break;
+
+ case WMI_TAG_ARRAY_UINT32:
+ if (!svc_ready->wmi_svc_bitmap_done) {
+ expect_len = WMI_SERVICE_BM_SIZE * sizeof(u32);
+ if (len < expect_len) {
+ ath11k_warn(ab, "invalid len %d for the tag 0x%x\n",
+ len, tag);
+ return -EINVAL;
+ }
+
+ ath11k_wmi_service_bitmap_copy(wmi_handle, ptr);
+
+ svc_ready->wmi_svc_bitmap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_service_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_ready_parse svc_ready = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_parse,
+ &svc_ready);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready");
+
+ return 0;
+}
+
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len)
+{
+ struct sk_buff *skb;
+ struct ath11k_base *ab = wmi_sc->ab;
+ u32 round_len = roundup(len, 4);
+
+ skb = ath11k_htc_alloc_skb(ab, WMI_SKB_HEADROOM + round_len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, WMI_SKB_HEADROOM);
+ if (!IS_ALIGNED((unsigned long)skb->data, 4))
+ ath11k_warn(ab, "unaligned WMI skb data\n");
+
+ skb_put(skb, round_len);
+ memset(skb->data, 0, round_len);
+
+ return skb;
+}
+
+static u32 ath11k_wmi_mgmt_get_freq(struct ath11k *ar,
+ struct ieee80211_tx_info *info)
+{
+ struct ath11k_base *ab = ar->ab;
+ u32 freq = 0;
+
+ if (ab->hw_params.support_off_channel_tx &&
+ ar->scan.is_roc &&
+ (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
+ freq = ar->scan.roc_freq;
+
+ return freq;
+}
+
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(frame);
+ struct wmi_mgmt_send_cmd *cmd;
+ struct wmi_tlv *frame_tlv;
+ struct sk_buff *skb;
+ u32 buf_len;
+ int ret, len;
+
+ buf_len = frame->len < WMI_MGMT_SEND_DOWNLD_LEN ?
+ frame->len : WMI_MGMT_SEND_DOWNLD_LEN;
+
+ len = sizeof(*cmd) + sizeof(*frame_tlv) + roundup(buf_len, 4);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_mgmt_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_MGMT_TX_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->desc_id = buf_id;
+ cmd->chanfreq = ath11k_wmi_mgmt_get_freq(ar, info);
+ cmd->paddr_lo = lower_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->paddr_hi = upper_32_bits(ATH11K_SKB_CB(frame)->paddr);
+ cmd->frame_len = frame->len;
+ cmd->buf_len = buf_len;
+ cmd->tx_params_valid = 0;
+
+ frame_tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ frame_tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, buf_len);
+
+ memcpy(frame_tlv->value, frame->data, buf_len);
+
+ ath11k_ce_byte_swap(frame_tlv->value, buf_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_MGMT_TX_SEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_MGMT_TX_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd mgmt tx send");
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_create_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_vdev_txrx_streams *txrx_streams;
+ struct wmi_tlv *tlv;
+ int ret, len;
+ void *ptr;
+
+ /* It can be optimized my sending tx/rx chain configuration
+ * only for supported bands instead of always sending it for
+ * both the bands.
+ */
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ (WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->if_id;
+ cmd->vdev_type = param->type;
+ cmd->vdev_subtype = param->subtype;
+ cmd->num_cfg_txrx_streams = WMI_NUM_SUPPORTED_BAND_MAX;
+ cmd->pdev_id = param->pdev_id;
+ cmd->mbssid_flags = param->mbssid_flags;
+ cmd->mbssid_tx_vdev_id = param->mbssid_tx_vdev_id;
+
+ ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
+
+ ptr = skb->data + sizeof(*cmd);
+ len = WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ txrx_streams = ptr;
+ len = sizeof(*txrx_streams);
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_2GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_2GHZ].rx;
+
+ txrx_streams++;
+ txrx_streams->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_TXRX_STREAMS) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->supported_tx_streams =
+ param->chains[NL80211_BAND_5GHZ].tx;
+ txrx_streams->supported_rx_streams =
+ param->chains[NL80211_BAND_5GHZ].rx;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to submit WMI_VDEV_CREATE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev create id %d type %d subtype %d macaddr %pM pdevid %d\n",
+ param->if_id, param->type, param->subtype,
+ macaddr, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DELETE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev delete id %d\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_stop_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_STOP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_STOP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_STOP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev stop id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_down_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_down_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_DOWN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_DOWN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_DOWN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev down id 0x%x\n", vdev_id);
+
+ return ret;
+}
+
+static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
+ struct wmi_vdev_start_req_arg *arg)
+{
+ u32 center_freq1 = arg->channel.band_center_freq1;
+
+ memset(chan, 0, sizeof(*chan));
+
+ chan->mhz = arg->channel.freq;
+ chan->band_center_freq1 = arg->channel.band_center_freq1;
+
+ if (arg->channel.mode == MODE_11AX_HE160) {
+ if (arg->channel.freq > arg->channel.band_center_freq1)
+ chan->band_center_freq1 = center_freq1 + 40;
+ else
+ chan->band_center_freq1 = center_freq1 - 40;
+
+ chan->band_center_freq2 = arg->channel.band_center_freq1;
+
+ } else if ((arg->channel.mode == MODE_11AC_VHT80_80) ||
+ (arg->channel.mode == MODE_11AX_HE80_80)) {
+ chan->band_center_freq2 = arg->channel.band_center_freq2;
+ } else {
+ chan->band_center_freq2 = 0;
+ }
+
+ chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
+ if (arg->channel.passive)
+ chan->info |= WMI_CHAN_INFO_PASSIVE;
+ if (arg->channel.allow_ibss)
+ chan->info |= WMI_CHAN_INFO_ADHOC_ALLOWED;
+ if (arg->channel.allow_ht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (arg->channel.allow_vht)
+ chan->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ if (arg->channel.allow_he)
+ chan->info |= WMI_CHAN_INFO_ALLOW_HE;
+ if (arg->channel.ht40plus)
+ chan->info |= WMI_CHAN_INFO_HT40_PLUS;
+ if (arg->channel.chan_radar)
+ chan->info |= WMI_CHAN_INFO_DFS;
+ if (arg->channel.freq2_radar)
+ chan->info |= WMI_CHAN_INFO_DFS_FREQ2;
+
+ chan->reg_info_1 = FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ arg->channel.max_power) |
+ FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ arg->channel.max_reg_power);
+
+ chan->reg_info_2 = FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ arg->channel.max_antenna_gain) |
+ FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ arg->channel.max_power);
+}
+
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_start_request_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
+ return -EINVAL;
+
+ len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_START_REQUEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->beacon_interval = arg->bcn_intval;
+ cmd->bcn_tx_rate = arg->bcn_tx_rate;
+ cmd->dtim_period = arg->dtim_period;
+ cmd->num_noa_descriptors = arg->num_noa_descriptors;
+ cmd->preferred_rx_streams = arg->pref_rx_streams;
+ cmd->preferred_tx_streams = arg->pref_tx_streams;
+ cmd->cac_duration_ms = arg->cac_duration_ms;
+ cmd->regdomain = arg->regdomain;
+ cmd->he_ops = arg->he_ops;
+ cmd->mbssid_flags = arg->mbssid_flags;
+ cmd->mbssid_tx_vdev_id = arg->mbssid_tx_vdev_id;
+
+ if (!restart) {
+ if (arg->ssid) {
+ cmd->ssid.ssid_len = arg->ssid_len;
+ memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
+ }
+ if (arg->hidden_ssid)
+ cmd->flags |= WMI_VDEV_START_HIDDEN_SSID;
+ if (arg->pmf_enabled)
+ cmd->flags |= WMI_VDEV_START_PMF_ENABLED;
+ }
+
+ cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
+
+ ptr = skb->data + sizeof(*cmd);
+ chan = ptr;
+
+ ath11k_wmi_put_wmi_channel(chan, arg);
+
+ chan->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*chan) - TLV_HDR_SIZE);
+ ptr += sizeof(*chan);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ /* Note: This is a nested TLV containing:
+ * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ */
+
+ ptr += sizeof(*tlv);
+
+ if (restart)
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_RESTART_REQUEST_CMDID);
+ else
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_START_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit vdev_%s cmd\n",
+ restart ? "restart" : "start");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev %s id 0x%x freq 0x%x mode 0x%x\n",
+ restart ? "restart" : "start", arg->vdev_id,
+ arg->channel.freq, arg->channel.mode);
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid, const u8 *bssid,
+ u8 *tx_bssid, u32 nontx_profile_idx, u32 nontx_profile_cnt)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_up_cmd *cmd;
+ struct ieee80211_bss_conf *bss_conf;
+ struct ath11k_vif *arvif;
+ struct sk_buff *skb;
+ int ret;
+
+ arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_up_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_UP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->vdev_assoc_id = aid;
+
+ ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+
+ cmd->nontx_profile_idx = nontx_profile_idx;
+ cmd->nontx_profile_cnt = nontx_profile_cnt;
+ if (tx_bssid)
+ ether_addr_copy(cmd->tx_vdev_bssid.addr, tx_bssid);
+
+ if (arvif && arvif->vif->type == NL80211_IFTYPE_STATION) {
+ bss_conf = &arvif->vif->bss_conf;
+
+ if (bss_conf->nontransmitted) {
+ ether_addr_copy(cmd->tx_vdev_bssid.addr,
+ bss_conf->transmitter_bssid);
+ cmd->nontx_profile_idx = bss_conf->bssid_index;
+ cmd->nontx_profile_cnt = bss_conf->bssid_indicator;
+ }
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_VDEV_UP cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev up id 0x%x assoc id %d bssid %pM\n",
+ vdev_id, aid, bssid);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_create_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_create_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_CREATE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_addr);
+ cmd->peer_type = param->peer_type;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit WMI_PEER_CREATE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer create vdev_id %d peer_addr %pM\n",
+ param->vdev_id, param->peer_addr);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_delete_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_delete_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_DELETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_DELETE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_DELETE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer delete vdev_id %d peer_addr %pM\n",
+ vdev_id, peer_addr);
+
+ return ret;
+}
+
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_regdomain_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_regdomain_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->reg_domain = param->current_rd_in_use;
+ cmd->reg_domain_2g = param->current_rd_2g;
+ cmd->reg_domain_5g = param->current_rd_5g;
+ cmd->conformance_test_limit_2g = param->ctl_2g;
+ cmd->conformance_test_limit_5g = param->ctl_5g;
+ cmd->dfs_domain = param->dfs_domain;
+ cmd->pdev_id = param->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_SET_REGDOMAIN cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev regd rd %d rd2g %d rd5g %d domain %d pdev id %d\n",
+ param->current_rd_in_use, param->current_rd_2g,
+ param->current_rd_5g, param->dfs_domain, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_val;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PEER_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer set param vdev %d peer 0x%pM set param %d value %d\n",
+ vdev_id, peer_addr, param_id, param_val);
+
+ return ret;
+}
+
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_flush_tids_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_flush_tids_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PEER_FLUSH_TIDS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->peer_tid_bitmap = param->peer_tid_bitmap;
+ cmd->vdev_id = param->vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_FLUSH_TIDS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_FLUSH_TIDS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer flush tids vdev_id %d peer_addr %pM tids %08x\n",
+ param->vdev_id, peer_addr, param->peer_tid_bitmap);
+
+ return ret;
+}
+
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar,
+ int vdev_id, const u8 *addr,
+ dma_addr_t paddr, u8 tid,
+ u8 ba_window_size_valid,
+ u32 ba_window_size)
+{
+ struct wmi_peer_reorder_queue_setup_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_setup_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, addr);
+ cmd->vdev_id = vdev_id;
+ cmd->tid = tid;
+ cmd->queue_ptr_lo = lower_32_bits(paddr);
+ cmd->queue_ptr_hi = upper_32_bits(paddr);
+ cmd->queue_no = tid;
+ cmd->ba_window_size_valid = ba_window_size_valid;
+ cmd->ba_window_size = ba_window_size;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_SETUP\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer reorder queue setup addr %pM vdev_id %d tid %d\n",
+ addr, vdev_id, tid);
+
+ return ret;
+}
+
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_reorder_queue_remove_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_peer_reorder_queue_remove_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_macaddr);
+ cmd->vdev_id = param->vdev_id;
+ cmd->tid_mask = param->peer_tid_bitmap;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_REORDER_QUEUE_REMOVE_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer reorder queue remove peer_macaddr %pM vdev_id %d tid_map %d",
+ param->peer_macaddr, param->vdev_id, param->peer_tid_bitmap);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set param %d pdev id %d value %d\n",
+ param_id, pdev_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
+ enum wmi_sta_ps_mode psmode)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_ps_mode_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_ps_mode_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STA_POWERSAVE_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->sta_ps_mode = psmode;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_MODE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SET_PARAM cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd sta powersave mode psmode %d vdev id %d\n",
+ psmode, vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_suspend_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SUSPEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->suspend_opt = suspend_opt;
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SUSPEND_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_SUSPEND cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev suspend pdev_id %d\n", pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_resume_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_resume_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_RESUME_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_RESUME_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_RESUME cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev resume pdev id %d\n", pdev_id);
+
+ return ret;
+}
+
+/* TODO FW Support for the cmd is not available yet.
+ * Can be tested once the command and corresponding
+ * event is implemented in FW
+ */
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_bss_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_bss_chan_info_req_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->req_type = type;
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_BSS_CHAN_INFO_REQUEST cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev bss chan info request type %d\n", type);
+
+ return ret;
+}
+
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_ap_ps_peer_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_ap_ps_peer_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_AP_PS_PEER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, peer_addr);
+ cmd->param = param->param;
+ cmd->value = param->value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_AP_PS_PEER_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_AP_PS_PEER_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd ap ps peer param vdev id %d peer %pM param %d value %d\n",
+ param->vdev_id, peer_addr, param->param, param->value);
+
+ return ret;
+}
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_powersave_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_powersave_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param = param;
+ cmd->value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STA_POWERSAVE_PARAM_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd set powersave param vdev_id %d param %d value %d\n",
+ vdev_id, param, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_FORCE_FW_HANG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->type = type;
+ cmd->delay_time_ms = delay_time_ms;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_FORCE_FW_HANG_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "Failed to send WMI_FORCE_FW_HANG_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd force fw hang");
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_param_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_param_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_SET_PARAM_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->param_id = param_id;
+ cmd->param_value = param_value;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_SET_PARAM_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_PARAM_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev set param vdev 0x%x param %d value %d\n",
+ vdev_id, param_id, param_value);
+
+ return ret;
+}
+
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_request_stats_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_request_stats_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_REQUEST_STATS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->stats_id = param->stats_id;
+ cmd->vdev_id = param->vdev_id;
+ cmd->pdev_id = param->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd request stats 0x%x vdev id %d pdev id %d\n",
+ param->stats_id, param->vdev_id, param->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_get_pdev_temperature_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_get_pdev_temperature_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_GET_TEMPERATURE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_GET_TEMPERATURE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_GET_TEMPERATURE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev get temperature for pdev_id %d\n", ar->pdev->pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_offload_ctrl_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_offload_ctrl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->bcn_ctrl_op = bcn_ctrl_op;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_OFFLOAD_CTRL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_BCN_OFFLOAD_CTRL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd bcn offload ctrl vdev id %d ctrl_op %d\n",
+ vdev_id, bcn_ctrl_op);
+
+ return ret;
+}
+
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn, u32 ema_params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_bcn_tmpl_cmd *cmd;
+ struct wmi_bcn_prb_info *bcn_prb_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(bcn->len, 4);
+ struct ieee80211_vif *vif;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev_id);
+
+ if (!arvif) {
+ ath11k_warn(ar->ab, "failed to find arvif with vdev id %d\n", vdev_id);
+ return -EINVAL;
+ }
+
+ vif = arvif->vif;
+
+ len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bcn_tmpl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BCN_TMPL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->tim_ie_offset = offs->tim_offset;
+
+ if (vif->bss_conf.csa_active) {
+ cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
+ }
+
+ cmd->buf_len = bcn->len;
+ cmd->mbssid_ie_offset = offs->mbssid_off;
+ cmd->ema_params = ema_params;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ bcn_prb_info = ptr;
+ len = sizeof(*bcn_prb_info);
+ bcn_prb_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_PRB_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ bcn_prb_info->caps = 0;
+ bcn_prb_info->erp = 0;
+
+ ptr += sizeof(*bcn_prb_info);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, bcn->data, bcn->len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd bcn tmpl");
+
+ return ret;
+}
+
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_install_key_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len;
+ int key_len_aligned = roundup(arg->key_len, sizeof(uint32_t));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + key_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_install_key_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VDEV_INSTALL_KEY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->macaddr);
+ cmd->key_idx = arg->key_idx;
+ cmd->key_flags = arg->key_flags;
+ cmd->key_cipher = arg->key_cipher;
+ cmd->key_len = arg->key_len;
+ cmd->key_txmic_len = arg->key_txmic_len;
+ cmd->key_rxmic_len = arg->key_rxmic_len;
+
+ if (arg->key_rsc_counter)
+ memcpy(&cmd->key_rsc_counter, &arg->key_rsc_counter,
+ sizeof(struct wmi_key_seq_counter));
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
+ if (arg->key_data)
+ memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_INSTALL_KEY cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
+
+ return ret;
+}
+
+static inline void
+ath11k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
+ struct peer_assoc_params *param,
+ bool hw_crypto_disabled)
+{
+ cmd->peer_flags = 0;
+
+ if (param->is_wme_set) {
+ if (param->qos_flag)
+ cmd->peer_flags |= WMI_PEER_QOS;
+ if (param->apsd_flag)
+ cmd->peer_flags |= WMI_PEER_APSD;
+ if (param->ht_flag)
+ cmd->peer_flags |= WMI_PEER_HT;
+ if (param->bw_40)
+ cmd->peer_flags |= WMI_PEER_40MHZ;
+ if (param->bw_80)
+ cmd->peer_flags |= WMI_PEER_80MHZ;
+ if (param->bw_160)
+ cmd->peer_flags |= WMI_PEER_160MHZ;
+
+ /* Typically if STBC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->stbc_flag)
+ cmd->peer_flags |= WMI_PEER_STBC;
+
+ /* Typically if LDPC is enabled for VHT it should be enabled
+ * for HT as well
+ **/
+ if (param->ldpc_flag)
+ cmd->peer_flags |= WMI_PEER_LDPC;
+
+ if (param->static_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_STATIC_MIMOPS;
+ if (param->dynamic_mimops_flag)
+ cmd->peer_flags |= WMI_PEER_DYN_MIMOPS;
+ if (param->spatial_mux_flag)
+ cmd->peer_flags |= WMI_PEER_SPATIAL_MUX;
+ if (param->vht_flag)
+ cmd->peer_flags |= WMI_PEER_VHT;
+ if (param->he_flag)
+ cmd->peer_flags |= WMI_PEER_HE;
+ if (param->twt_requester)
+ cmd->peer_flags |= WMI_PEER_TWT_REQ;
+ if (param->twt_responder)
+ cmd->peer_flags |= WMI_PEER_TWT_RESP;
+ }
+
+ /* Suppress authorization for all AUTH modes that need 4-way handshake
+ * (during re-association).
+ * Authorization will be done for these modes on key installation.
+ */
+ if (param->auth_flag)
+ cmd->peer_flags |= WMI_PEER_AUTH;
+ if (param->need_ptk_4_way) {
+ cmd->peer_flags |= WMI_PEER_NEED_PTK_4_WAY;
+ if (!hw_crypto_disabled && param->is_assoc)
+ cmd->peer_flags &= ~WMI_PEER_AUTH;
+ }
+ if (param->need_gtk_2_way)
+ cmd->peer_flags |= WMI_PEER_NEED_GTK_2_WAY;
+ /* safe mode bypass the 4-way handshake */
+ if (param->safe_mode_enabled)
+ cmd->peer_flags &= ~(WMI_PEER_NEED_PTK_4_WAY |
+ WMI_PEER_NEED_GTK_2_WAY);
+
+ if (param->is_pmf_enabled)
+ cmd->peer_flags |= WMI_PEER_PMF;
+
+ /* Disable AMSDU for station transmit, if user configures it */
+ /* Disable AMSDU for AP transmit to 11n Stations, if user configures
+ * it
+ * if (param->amsdu_disable) Add after FW support
+ **/
+
+ /* Target asserts if node is marked HT and all MCS is set to 0.
+ * Mark the node as non-HT if all the mcs rates are disabled through
+ * iwpriv
+ **/
+ if (param->peer_ht_rates.num_rates == 0)
+ cmd->peer_flags &= ~WMI_PEER_HT;
+}
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_peer_assoc_complete_cmd *cmd;
+ struct wmi_vht_rate_set *mcs;
+ struct wmi_he_rate_set *he_mcs;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 peer_legacy_rates_align;
+ u32 peer_ht_rates_align;
+ int i, ret, len;
+
+ peer_legacy_rates_align = roundup(param->peer_legacy_rates.num_rates,
+ sizeof(u32));
+ peer_ht_rates_align = roundup(param->peer_ht_rates.num_rates,
+ sizeof(u32));
+
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + (peer_legacy_rates_align * sizeof(u8)) +
+ TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
+ sizeof(*mcs) + TLV_HDR_SIZE +
+ (sizeof(*he_mcs) * param->peer_he_mcs_count);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+
+ cmd->peer_new_assoc = param->peer_new_assoc;
+ cmd->peer_associd = param->peer_associd;
+
+ ath11k_wmi_copy_peer_flags(cmd, param,
+ test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED,
+ &ar->ab->dev_flags));
+
+ ether_addr_copy(cmd->peer_macaddr.addr, param->peer_mac);
+
+ cmd->peer_rate_caps = param->peer_rate_caps;
+ cmd->peer_caps = param->peer_caps;
+ cmd->peer_listen_intval = param->peer_listen_intval;
+ cmd->peer_ht_caps = param->peer_ht_caps;
+ cmd->peer_max_mpdu = param->peer_max_mpdu;
+ cmd->peer_mpdu_density = param->peer_mpdu_density;
+ cmd->peer_vht_caps = param->peer_vht_caps;
+ cmd->peer_phymode = param->peer_phymode;
+
+ /* Update 11ax capabilities */
+ cmd->peer_he_cap_info = param->peer_he_cap_macinfo[0];
+ cmd->peer_he_cap_info_ext = param->peer_he_cap_macinfo[1];
+ cmd->peer_he_cap_info_internal = param->peer_he_cap_macinfo_internal;
+ cmd->peer_he_caps_6ghz = param->peer_he_caps_6ghz;
+ cmd->peer_he_ops = param->peer_he_ops;
+ memcpy(&cmd->peer_he_cap_phy, &param->peer_he_cap_phyinfo,
+ sizeof(param->peer_he_cap_phyinfo));
+ memcpy(&cmd->peer_ppet, &param->peer_ppet,
+ sizeof(param->peer_ppet));
+
+ /* Update peer legacy rate information */
+ ptr += sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_legacy_rates_align);
+
+ ptr += TLV_HDR_SIZE;
+
+ cmd->num_peer_legacy_rates = param->peer_legacy_rates.num_rates;
+ memcpy(ptr, param->peer_legacy_rates.rates,
+ param->peer_legacy_rates.num_rates);
+
+ /* Update peer HT rate information */
+ ptr += peer_legacy_rates_align;
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, peer_ht_rates_align);
+ ptr += TLV_HDR_SIZE;
+ cmd->num_peer_ht_rates = param->peer_ht_rates.num_rates;
+ memcpy(ptr, param->peer_ht_rates.rates,
+ param->peer_ht_rates.num_rates);
+
+ /* VHT Rates */
+ ptr += peer_ht_rates_align;
+
+ mcs = ptr;
+
+ mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_VHT_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*mcs) - TLV_HDR_SIZE);
+
+ cmd->peer_nss = param->peer_nss;
+
+ /* Update bandwidth-NSS mapping */
+ cmd->peer_bw_rxnss_override = 0;
+ cmd->peer_bw_rxnss_override |= param->peer_bw_rxnss_override;
+
+ if (param->vht_capable) {
+ mcs->rx_max_rate = param->rx_max_rate;
+ mcs->rx_mcs_set = param->rx_mcs_set;
+ mcs->tx_max_rate = param->tx_max_rate;
+ mcs->tx_mcs_set = param->tx_mcs_set;
+ }
+
+ /* HE Rates */
+ cmd->peer_he_mcs = param->peer_he_mcs_count;
+ cmd->min_data_rate = param->min_data_rate;
+
+ ptr += sizeof(*mcs);
+
+ len = param->peer_he_mcs_count * sizeof(*he_mcs);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ /* Loop through the HE rate set */
+ for (i = 0; i < param->peer_he_mcs_count; i++) {
+ he_mcs = ptr;
+ he_mcs->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_HE_RATE_SET) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*he_mcs) - TLV_HDR_SIZE);
+
+ he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
+ he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
+ ptr += sizeof(*he_mcs);
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PEER_ASSOC_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x\n",
+ cmd->vdev_id, cmd->peer_associd, param->peer_mac,
+ cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
+ cmd->peer_listen_intval, cmd->peer_ht_caps,
+ cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode,
+ cmd->peer_mpdu_density,
+ cmd->peer_vht_caps, cmd->peer_he_cap_info,
+ cmd->peer_he_ops, cmd->peer_he_cap_info_ext,
+ cmd->peer_he_cap_phy[0], cmd->peer_he_cap_phy[1],
+ cmd->peer_he_cap_phy[2],
+ cmd->peer_bw_rxnss_override);
+
+ return ret;
+}
+
+void ath11k_wmi_start_scan_init(struct ath11k *ar,
+ struct scan_req_params *arg)
+{
+ /* setup commonly used values */
+ arg->scan_req_id = 1;
+ if (ar->state_11d == ATH11K_11D_PREPARING)
+ arg->scan_priority = WMI_SCAN_PRIORITY_MEDIUM;
+ else
+ arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
+ arg->dwell_time_active = 50;
+ arg->dwell_time_active_2g = 0;
+ arg->dwell_time_passive = 150;
+ arg->dwell_time_active_6g = 40;
+ arg->dwell_time_passive_6g = 30;
+ arg->min_rest_time = 50;
+ arg->max_rest_time = 500;
+ arg->repeat_probe_time = 0;
+ arg->probe_spacing_time = 0;
+ arg->idle_time = 0;
+ arg->max_scan_time = 20000;
+ arg->probe_delay = 5;
+ arg->notify_scan_events = WMI_SCAN_EVENT_STARTED |
+ WMI_SCAN_EVENT_COMPLETED |
+ WMI_SCAN_EVENT_BSS_CHANNEL |
+ WMI_SCAN_EVENT_FOREIGN_CHAN |
+ WMI_SCAN_EVENT_DEQUEUED;
+ arg->scan_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+
+ if (test_bit(WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE,
+ ar->ab->wmi_ab.svc_map))
+ arg->scan_ctrl_flags_ext |=
+ WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE;
+
+ arg->num_bssid = 1;
+
+ /* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
+ * ZEROs in probe request
+ */
+ eth_broadcast_addr(arg->bssid_list[0].addr);
+}
+
+static inline void
+ath11k_wmi_copy_scan_event_cntrl_flags(struct wmi_start_scan_cmd *cmd,
+ struct scan_req_params *param)
+{
+ /* Scan events subscription */
+ if (param->scan_ev_started)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_STARTED;
+ if (param->scan_ev_completed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_COMPLETED;
+ if (param->scan_ev_bss_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_BSS_CHANNEL;
+ if (param->scan_ev_foreign_chan)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN;
+ if (param->scan_ev_dequeued)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_DEQUEUED;
+ if (param->scan_ev_preempted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_PREEMPTED;
+ if (param->scan_ev_start_failed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_START_FAILED;
+ if (param->scan_ev_restarted)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESTARTED;
+ if (param->scan_ev_foreign_chn_exit)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT;
+ if (param->scan_ev_suspended)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_SUSPENDED;
+ if (param->scan_ev_resumed)
+ cmd->notify_scan_events |= WMI_SCAN_EVENT_RESUMED;
+
+ /** Set scan control flags */
+ cmd->scan_ctrl_flags = 0;
+ if (param->scan_f_passive)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
+ if (param->scan_f_strict_passive_pch)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN;
+ if (param->scan_f_promisc_mode)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROMISCUOS;
+ if (param->scan_f_capture_phy_err)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CAPTURE_PHY_ERROR;
+ if (param->scan_f_half_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_HALF_RATE_SUPPORT;
+ if (param->scan_f_quarter_rate)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT;
+ if (param->scan_f_cck_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES;
+ if (param->scan_f_ofdm_rates)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_OFDM_RATES;
+ if (param->scan_f_chan_stat_evnt)
+ cmd->scan_ctrl_flags |= WMI_SCAN_CHAN_STAT_EVENT;
+ if (param->scan_f_filter_prb_req)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
+ if (param->scan_f_bcast_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_BCAST_PROBE_REQ;
+ if (param->scan_f_offchan_mgmt_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_MGMT_TX;
+ if (param->scan_f_offchan_data_tx)
+ cmd->scan_ctrl_flags |= WMI_SCAN_OFFCHAN_DATA_TX;
+ if (param->scan_f_force_active_dfs_chn)
+ cmd->scan_ctrl_flags |= WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS;
+ if (param->scan_f_add_tpc_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_ds_ie_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ;
+ if (param->scan_f_add_spoofed_mac_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ;
+ if (param->scan_f_add_rand_seq_in_probe)
+ cmd->scan_ctrl_flags |= WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ if (param->scan_f_en_ie_whitelist_in_probe)
+ cmd->scan_ctrl_flags |=
+ WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ;
+
+ /* for adaptive scan mode using 3 bits (21 - 23 bits) */
+ WMI_SCAN_SET_DWELL_MODE(cmd->scan_ctrl_flags,
+ param->adaptive_dwell_time_mode);
+
+ cmd->scan_ctrl_flags_ext = param->scan_ctrl_flags_ext;
+}
+
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_start_scan_cmd *cmd;
+ struct wmi_ssid *ssid = NULL;
+ struct wmi_mac_addr *bssid;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u32 *tmp_ptr;
+ u16 extraie_len_with_pad = 0;
+ struct hint_short_ssid *s_ssid = NULL;
+ struct hint_bssid *hint_bssid = NULL;
+
+ len = sizeof(*cmd);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_chan)
+ len += params->num_chan * sizeof(u32);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_ssids)
+ len += params->num_ssids * sizeof(*ssid);
+
+ len += TLV_HDR_SIZE;
+ if (params->num_bssid)
+ len += sizeof(*bssid) * params->num_bssid;
+
+ len += TLV_HDR_SIZE;
+ if (params->extraie.len && params->extraie.len <= 0xFFFF)
+ extraie_len_with_pad =
+ roundup(params->extraie.len, sizeof(u32));
+ len += extraie_len_with_pad;
+
+ if (params->num_hint_bssid)
+ len += TLV_HDR_SIZE +
+ params->num_hint_bssid * sizeof(struct hint_bssid);
+
+ if (params->num_hint_s_ssid)
+ len += TLV_HDR_SIZE +
+ params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ ptr = skb->data;
+
+ cmd = ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_START_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->scan_id = params->scan_id;
+ cmd->scan_req_id = params->scan_req_id;
+ cmd->vdev_id = params->vdev_id;
+ cmd->scan_priority = params->scan_priority;
+ cmd->notify_scan_events = params->notify_scan_events;
+
+ ath11k_wmi_copy_scan_event_cntrl_flags(cmd, params);
+
+ cmd->dwell_time_active = params->dwell_time_active;
+ cmd->dwell_time_active_2g = params->dwell_time_active_2g;
+ cmd->dwell_time_passive = params->dwell_time_passive;
+ cmd->dwell_time_active_6g = params->dwell_time_active_6g;
+ cmd->dwell_time_passive_6g = params->dwell_time_passive_6g;
+ cmd->min_rest_time = params->min_rest_time;
+ cmd->max_rest_time = params->max_rest_time;
+ cmd->repeat_probe_time = params->repeat_probe_time;
+ cmd->probe_spacing_time = params->probe_spacing_time;
+ cmd->idle_time = params->idle_time;
+ cmd->max_scan_time = params->max_scan_time;
+ cmd->probe_delay = params->probe_delay;
+ cmd->burst_duration = params->burst_duration;
+ cmd->num_chan = params->num_chan;
+ cmd->num_bssid = params->num_bssid;
+ cmd->num_ssids = params->num_ssids;
+ cmd->ie_len = params->extraie.len;
+ cmd->n_probes = params->n_probes;
+ ether_addr_copy(cmd->mac_addr.addr, params->mac_addr.addr);
+ ether_addr_copy(cmd->mac_mask.addr, params->mac_mask.addr);
+
+ ptr += sizeof(*cmd);
+
+ len = params->num_chan * sizeof(u32);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+ tmp_ptr = (u32 *)ptr;
+
+ for (i = 0; i < params->num_chan; ++i)
+ tmp_ptr[i] = params->chan_list[i];
+
+ ptr += len;
+
+ len = params->num_ssids * sizeof(*ssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+
+ if (params->num_ssids) {
+ ssid = ptr;
+ for (i = 0; i < params->num_ssids; ++i) {
+ ssid->ssid_len = params->ssid[i].length;
+ memcpy(ssid->ssid, params->ssid[i].ssid,
+ params->ssid[i].length);
+ ssid++;
+ }
+ }
+
+ ptr += (params->num_ssids * sizeof(*ssid));
+ len = params->num_bssid * sizeof(*bssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ bssid = ptr;
+
+ if (params->num_bssid) {
+ for (i = 0; i < params->num_bssid; ++i) {
+ ether_addr_copy(bssid->addr,
+ params->bssid_list[i].addr);
+ bssid++;
+ }
+ }
+
+ ptr += params->num_bssid * sizeof(*bssid);
+
+ len = extraie_len_with_pad;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+
+ if (extraie_len_with_pad)
+ memcpy(ptr, params->extraie.ptr,
+ params->extraie.len);
+
+ ptr += extraie_len_with_pad;
+
+ if (params->num_hint_s_ssid) {
+ len = params->num_hint_s_ssid * sizeof(struct hint_short_ssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+ s_ssid = ptr;
+ for (i = 0; i < params->num_hint_s_ssid; ++i) {
+ s_ssid->freq_flags = params->hint_s_ssid[i].freq_flags;
+ s_ssid->short_ssid = params->hint_s_ssid[i].short_ssid;
+ s_ssid++;
+ }
+ ptr += len;
+ }
+
+ if (params->num_hint_bssid) {
+ len = params->num_hint_bssid * sizeof(struct hint_bssid);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_FIXED_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE;
+ hint_bssid = ptr;
+ for (i = 0; i < params->num_hint_bssid; ++i) {
+ hint_bssid->freq_flags =
+ params->hint_bssid[i].freq_flags;
+ ether_addr_copy(&params->hint_bssid[i].bssid.addr[0],
+ &hint_bssid->bssid.addr[0]);
+ hint_bssid++;
+ }
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_START_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_START_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd start scan");
+
+ return ret;
+}
+
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_stop_scan_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_stop_scan_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_STOP_SCAN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ cmd->requestor = param->requester;
+ cmd->scan_id = param->scan_id;
+ cmd->pdev_id = param->pdev_id;
+ /* stop the scan with the corresponding scan_id */
+ if (param->req_type == WLAN_SCAN_CANCEL_PDEV_ALL) {
+ /* Cancelling all scans */
+ cmd->req_type = WMI_SCAN_STOP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_VDEV_ALL) {
+ /* Cancelling VAP scans */
+ cmd->req_type = WMI_SCN_STOP_VAP_ALL;
+ } else if (param->req_type == WLAN_SCAN_CANCEL_SINGLE) {
+ /* Cancelling specific scan */
+ cmd->req_type = WMI_SCAN_STOP_ONE;
+ } else {
+ ath11k_warn(ar->ab, "invalid scan cancel param %d",
+ param->req_type);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_STOP_SCAN_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_STOP_SCAN_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd stop scan");
+
+ return ret;
+}
+
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_scan_chan_list_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_channel *chan_info;
+ struct channel_param *tchan_info;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int i, ret, len;
+ u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
+ u32 *reg1, *reg2;
+
+ tchan_info = chan_list->ch_param;
+ while (chan_list->nallchans) {
+ len = sizeof(*cmd) + TLV_HDR_SIZE;
+ max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
+ sizeof(*chan_info);
+
+ if (chan_list->nallchans > max_chan_limit)
+ num_send_chans = max_chan_limit;
+ else
+ num_send_chans = chan_list->nallchans;
+
+ chan_list->nallchans -= num_send_chans;
+ len += sizeof(*chan_info) * num_send_chans;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_chan_list_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SCAN_CHAN_LIST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = chan_list->pdev_id;
+ cmd->num_scan_chans = num_send_chans;
+ if (num_sends)
+ cmd->flags |= WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "no.of chan = %d len = %d pdev_id = %d num_sends = %d\n",
+ num_send_chans, len, cmd->pdev_id, num_sends);
+
+ ptr = skb->data + sizeof(*cmd);
+
+ len = sizeof(*chan_info) * num_send_chans;
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ ptr += TLV_HDR_SIZE;
+
+ for (i = 0; i < num_send_chans; ++i) {
+ chan_info = ptr;
+ memset(chan_info, 0, sizeof(*chan_info));
+ len = sizeof(*chan_info);
+ chan_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_CHANNEL) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+
+ reg1 = &chan_info->reg_info_1;
+ reg2 = &chan_info->reg_info_2;
+ chan_info->mhz = tchan_info->mhz;
+ chan_info->band_center_freq1 = tchan_info->cfreq1;
+ chan_info->band_center_freq2 = tchan_info->cfreq2;
+
+ if (tchan_info->is_chan_passive)
+ chan_info->info |= WMI_CHAN_INFO_PASSIVE;
+ if (tchan_info->allow_he)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HE;
+ else if (tchan_info->allow_vht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_VHT;
+ else if (tchan_info->allow_ht)
+ chan_info->info |= WMI_CHAN_INFO_ALLOW_HT;
+ if (tchan_info->half_rate)
+ chan_info->info |= WMI_CHAN_INFO_HALF_RATE;
+ if (tchan_info->quarter_rate)
+ chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
+ if (tchan_info->psc_channel)
+ chan_info->info |= WMI_CHAN_INFO_PSC;
+ if (tchan_info->dfs_set)
+ chan_info->info |= WMI_CHAN_INFO_DFS;
+
+ chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
+ tchan_info->phy_mode);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MIN_PWR,
+ tchan_info->minpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_PWR,
+ tchan_info->maxpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_MAX_REG_PWR,
+ tchan_info->maxregpower);
+ *reg1 |= FIELD_PREP(WMI_CHAN_REG_INFO1_REG_CLS,
+ tchan_info->reg_class_id);
+ *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_ANT_MAX,
+ tchan_info->antennamax);
+ *reg2 |= FIELD_PREP(WMI_CHAN_REG_INFO2_MAX_TX_PWR,
+ tchan_info->maxregpower);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "chan scan list chan[%d] = %u, chan_info->info %8x\n",
+ i, chan_info->mhz, chan_info->info);
+
+ ptr += sizeof(*chan_info);
+
+ tchan_info++;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SCAN_CHAN_LIST_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_SCAN_CHAN_LIST cmd\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd scan chan list channels %d",
+ num_send_chans);
+
+ num_sends++;
+ }
+
+ return 0;
+}
+
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_vdev_set_wmm_params_cmd *cmd;
+ struct wmi_wmm_params *wmm_param;
+ struct wmi_wmm_params_arg *wmi_wmm_arg;
+ struct sk_buff *skb;
+ int ret, ac;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_vdev_set_wmm_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->wmm_param_type = 0;
+
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ switch (ac) {
+ case WME_AC_BE:
+ wmi_wmm_arg = &param->ac_be;
+ break;
+ case WME_AC_BK:
+ wmi_wmm_arg = &param->ac_bk;
+ break;
+ case WME_AC_VI:
+ wmi_wmm_arg = &param->ac_vi;
+ break;
+ case WME_AC_VO:
+ wmi_wmm_arg = &param->ac_vo;
+ break;
+ }
+
+ wmm_param = (struct wmi_wmm_params *)&cmd->wmm_params[ac];
+ wmm_param->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*wmm_param) - TLV_HDR_SIZE);
+
+ wmm_param->aifs = wmi_wmm_arg->aifs;
+ wmm_param->cwmin = wmi_wmm_arg->cwmin;
+ wmm_param->cwmax = wmi_wmm_arg->cwmax;
+ wmm_param->txoplimit = wmi_wmm_arg->txop;
+ wmm_param->acm = wmi_wmm_arg->acm;
+ wmm_param->no_ack = wmi_wmm_arg->no_ack;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+ ac, wmm_param->aifs, wmm_param->cwmin,
+ wmm_param->cwmax, wmm_param->txoplimit,
+ wmm_param->acm, wmm_param->no_ack);
+ }
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_VDEV_SET_WMM_PARAMS_CMDID");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd vdev set wmm params");
+
+ return ret;
+}
+
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_dfs_phyerr_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_dfs_phyerr_offload_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev dfs phyerr offload enable pdev id %d\n", pdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_delba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_delba_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DELBA_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->initiator = initiator;
+ cmd->reasoncode = reason;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DELBA_SEND_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DELBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd delba send vdev_id 0x%X mac_addr %pM tid %u initiator %u reason %u\n",
+ vdev_id, mac, tid, initiator, reason);
+
+ return ret;
+}
+
+int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_setresponse_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_setresponse_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SETRESPONSE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->statuscode = status;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SET_RESP_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SET_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd addba set resp vdev_id 0x%X mac_addr %pM tid %u status %u\n",
+ vdev_id, mac, tid, status);
+
+ return ret;
+}
+
+int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_send_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_send_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_SEND_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+ cmd->tid = tid;
+ cmd->buffersize = buf_size;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_SEND_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_SEND_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd addba send vdev_id 0x%X mac_addr %pM tid %u bufsize %u\n",
+ vdev_id, mac, tid, buf_size);
+
+ return ret;
+}
+
+int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_addba_clear_resp_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_addba_clear_resp_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ADDBA_CLEAR_RESP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, mac);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_ADDBA_CLEAR_RESP_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_ADDBA_CLEAR_RESP_CMDID cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd addba clear resp vdev_id 0x%X mac_addr %pM\n",
+ vdev_id, mac);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_pktlog_filter_cmd *cmd;
+ struct wmi_pdev_pktlog_filter_info *info;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ int ret, len;
+
+ len = sizeof(*cmd) + sizeof(*info) + TLV_HDR_SIZE;
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_pktlog_filter_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->num_mac = 1;
+ cmd->enable = enable;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*info));
+
+ ptr += TLV_HDR_SIZE;
+ info = ptr;
+
+ ether_addr_copy(info->peer_macaddr.addr, addr);
+ info->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*info) - TLV_HDR_SIZE);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_FILTER_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog filter");
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_init_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_country_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_INIT_COUNTRY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+
+ switch (init_cc_params.flags) {
+ case ALPHA_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_ALPHA;
+ memcpy((u8 *)&cmd->cc_info.alpha2,
+ init_cc_params.cc_info.alpha2, 3);
+ break;
+ case CC_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE;
+ cmd->cc_info.country_code = init_cc_params.cc_info.country_code;
+ break;
+ case REGDMN_IS_SET:
+ cmd->init_cc_type = WMI_COUNTRY_INFO_TYPE_REGDOMAIN;
+ cmd->cc_info.regdom_id = init_cc_params.cc_info.regdom_id;
+ break;
+ default:
+ ath11k_warn(ar->ab, "unknown cc params flags: 0x%x",
+ init_cc_params.flags);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_SET_INIT_COUNTRY_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_SET_INIT_COUNTRY CMD :%d\n",
+ ret);
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd set init country");
+
+ return 0;
+
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
+ struct wmi_set_current_country_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_set_current_country_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_current_country_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_SET_CURRENT_COUNTRY_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(&cmd->new_alpha2, &param->alpha2, 3);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_SET_CURRENT_COUNTRY_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_SET_CURRENT_COUNTRY_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd set current country pdev id %d alpha2 %c%c\n",
+ ar->pdev->pdev_id,
+ param->alpha2[0],
+ param->alpha2[1]);
+
+ return ret;
+}
+
+int
+ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
+ struct thermal_mitigation_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_therm_throt_config_request_cmd *cmd;
+ struct wmi_therm_throt_level_config_info *lvl_conf;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int i, ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE +
+ THERMAL_LEVELS * sizeof(struct wmi_therm_throt_level_config_info);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_therm_throt_config_request_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_CONFIG_REQUEST) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->enable = param->enable;
+ cmd->dc = param->dc;
+ cmd->dc_per_event = param->dc_per_event;
+ cmd->therm_throt_levels = THERMAL_LEVELS;
+
+ tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN,
+ (THERMAL_LEVELS *
+ sizeof(struct wmi_therm_throt_level_config_info)));
+
+ lvl_conf = (struct wmi_therm_throt_level_config_info *)(skb->data +
+ sizeof(*cmd) +
+ TLV_HDR_SIZE);
+ for (i = 0; i < THERMAL_LEVELS; i++) {
+ lvl_conf->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*lvl_conf) - TLV_HDR_SIZE);
+
+ lvl_conf->temp_lwm = param->levelconf[i].tmplwm;
+ lvl_conf->temp_hwm = param->levelconf[i].tmphwm;
+ lvl_conf->dc_off_percent = param->levelconf[i].dcoffpercent;
+ lvl_conf->prio = param->levelconf[i].priority;
+ lvl_conf++;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_THERM_THROT_SET_CONF_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send THERM_THROT_SET_CONF cmd\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd therm throt set conf pdev_id %d enable %d dc %d dc_per_event %x levels %d\n",
+ ar->pdev->pdev_id, param->enable, param->dc,
+ param->dc_per_event, THERMAL_LEVELS);
+
+ return ret;
+}
+
+int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
+ struct wmi_11d_scan_start_params *param)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_11d_scan_start_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_start_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_START_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = param->vdev_id;
+ cmd->scan_period_msec = param->scan_period_msec;
+ cmd->start_interval_msec = param->start_interval_msec;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_START_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_START_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd 11d scan start vdev id %d period %d ms internal %d ms\n",
+ cmd->vdev_id,
+ cmd->scan_period_msec,
+ cmd->start_interval_msec);
+
+ return ret;
+}
+
+int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_11d_scan_stop_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_11d_scan_stop_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG, WMI_TAG_11D_SCAN_STOP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_11D_SCAN_STOP_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_11D_SCAN_STOP_CMDID: %d\n", ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd 11d scan stop vdev id %d\n",
+ cmd->vdev_id);
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_enable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+ cmd->evlist = pktlog_filter;
+ cmd->enable = ATH11K_WMI_PKTLOG_ENABLE_FORCE;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog enable");
+
+ return ret;
+}
+
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pktlog_disable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pktlog_disable_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_PKTLOG_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = DP_HW2SW_MACID(ar->pdev->pdev_id);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_PDEV_PKTLOG_ENABLE_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd pdev pktlog disable");
+
+ return ret;
+}
+
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params)
+{
+ twt_params->sta_cong_timer_ms = ATH11K_TWT_DEF_STA_CONG_TIMER_MS;
+ twt_params->default_slot_size = ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE;
+ twt_params->congestion_thresh_setup = ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP;
+ twt_params->congestion_thresh_teardown =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN;
+ twt_params->congestion_thresh_critical =
+ ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL;
+ twt_params->interference_thresh_teardown =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN;
+ twt_params->interference_thresh_setup =
+ ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP;
+ twt_params->min_no_sta_setup = ATH11K_TWT_DEF_MIN_NO_STA_SETUP;
+ twt_params->min_no_sta_teardown = ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN;
+ twt_params->no_of_bcast_mcast_slots = ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS;
+ twt_params->min_no_twt_slots = ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS;
+ twt_params->max_no_sta_twt = ATH11K_TWT_DEF_MAX_NO_STA_TWT;
+ twt_params->mode_check_interval = ATH11K_TWT_DEF_MODE_CHECK_INTERVAL;
+ twt_params->add_sta_slot_interval = ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL;
+ twt_params->remove_sta_slot_interval =
+ ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL;
+ /* TODO add MBSSID support */
+ twt_params->mbss_support = 0;
+}
+
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_enable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+ cmd->sta_cong_timer_ms = params->sta_cong_timer_ms;
+ cmd->default_slot_size = params->default_slot_size;
+ cmd->congestion_thresh_setup = params->congestion_thresh_setup;
+ cmd->congestion_thresh_teardown = params->congestion_thresh_teardown;
+ cmd->congestion_thresh_critical = params->congestion_thresh_critical;
+ cmd->interference_thresh_teardown = params->interference_thresh_teardown;
+ cmd->interference_thresh_setup = params->interference_thresh_setup;
+ cmd->min_no_sta_setup = params->min_no_sta_setup;
+ cmd->min_no_sta_teardown = params->min_no_sta_teardown;
+ cmd->no_of_bcast_mcast_slots = params->no_of_bcast_mcast_slots;
+ cmd->min_no_twt_slots = params->min_no_twt_slots;
+ cmd->max_no_sta_twt = params->max_no_sta_twt;
+ cmd->mode_check_interval = params->mode_check_interval;
+ cmd->add_sta_slot_interval = params->add_sta_slot_interval;
+ cmd->remove_sta_slot_interval = params->remove_sta_slot_interval;
+ cmd->mbss_support = params->mbss_support;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ar->twt_enabled = 1;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt enable");
+
+ return 0;
+}
+
+int
+ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_disable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_disable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DISABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DISABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_TWT_DISABLE_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd twt disable");
+
+ ar->twt_enabled = 0;
+
+ return 0;
+}
+
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_add_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_add_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_ADD_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->wake_intvl_us = params->wake_intvl_us;
+ cmd->wake_intvl_mantis = params->wake_intvl_mantis;
+ cmd->wake_dura_us = params->wake_dura_us;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->flags = params->twt_cmd;
+ if (params->flag_bcast)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_BCAST;
+ if (params->flag_trigger)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_TRIGGER;
+ if (params->flag_flow_type)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE;
+ if (params->flag_protection)
+ cmd->flags |= WMI_TWT_ADD_DIALOG_FLAG_PROTECTION;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_ADD_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to add twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd twt add dialog vdev %u dialog id %u wake interval %u mantissa %u wake duration %u service period offset %u flags 0x%x\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->wake_intvl_us,
+ cmd->wake_intvl_mantis, cmd->wake_dura_us, cmd->sp_offset_us,
+ cmd->flags);
+
+ return 0;
+}
+
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_del_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_del_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_TWT_DEL_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_DEL_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to delete twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd twt del dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ return 0;
+}
+
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_pause_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_pause_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_PAUSE_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to pause twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd twt pause dialog vdev %u dialog id %u\n",
+ cmd->vdev_id, cmd->dialog_id);
+
+ return 0;
+}
+
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_twt_resume_dialog_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_twt_resume_dialog_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = params->vdev_id;
+ ether_addr_copy(cmd->peer_macaddr.addr, params->peer_macaddr);
+ cmd->dialog_id = params->dialog_id;
+ cmd->sp_offset_us = params->sp_offset_us;
+ cmd->next_twt_size = params->next_twt_size;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_TWT_RESUME_DIALOG_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send wmi command to resume twt dialog: %d",
+ ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd twt resume dialog vdev %u dialog id %u service period offset %u next twt subfield size %u\n",
+ cmd->vdev_id, cmd->dialog_id, cmd->sp_offset_us,
+ cmd->next_twt_size);
+
+ return 0;
+}
+
+int
+ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_spatial_reuse_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_spatial_reuse_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->enable = he_obss_pd->enable;
+ cmd->obss_min = he_obss_pd->min_offset;
+ cmd->obss_max = he_obss_pd->max_offset;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "Failed to send WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev obss pd spatial reuse");
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set srg bss color bitmap pdev_id %d bss color bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set srg partial bssid bitmap pdev_id %d partial bssid bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set srg obsscolor enable pdev_id %d bss color enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set non srg obss color enable bitmap pdev_id %d bss color enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar, u32 *bitmap)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_pdev_obss_pd_bitmap_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_obss_pd_bitmap_cmd *)skb->data;
+ cmd->tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ memcpy(cmd->bitmap, bitmap, sizeof(cmd->bitmap));
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to send WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev set non srg obss bssid enable bitmap pdev_id %d bssid enable bitmap %08x %08x\n",
+ cmd->pdev_id, cmd->bitmap[0], cmd->bitmap[1]);
+
+ return 0;
+}
+
+int
+ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_obss_color_collision_cfg_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_obss_color_collision_cfg_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->evt_type = enable ? ATH11K_OBSS_COLOR_COLLISION_DETECTION :
+ ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE;
+ cmd->current_bss_color = bss_color;
+ cmd->detection_period_ms = period;
+ cmd->scan_period_ms = ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS;
+ cmd->free_slot_expiry_time_ms = 0;
+ cmd->flags = 0;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd obss color collision det config id %d type %d bss_color %d detect_period %d scan_period %d\n",
+ cmd->vdev_id, cmd->evt_type, cmd->current_bss_color,
+ cmd->detection_period_ms, cmd->scan_period_ms);
+
+ return 0;
+}
+
+int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
+ bool enable)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct wmi_bss_color_change_enable_params_cmd *cmd;
+ struct sk_buff *skb;
+ int ret, len;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_bss_color_change_enable_params_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_BSS_COLOR_CHANGE_ENABLE) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->enable = enable ? 1 : 0;
+
+ ret = ath11k_wmi_cmd_send(wmi, skb,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "Failed to send WMI_BSS_COLOR_CHANGE_ENABLE_CMDID");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd bss color change enable id %d enable %d\n",
+ cmd->vdev_id, cmd->enable);
+
+ return 0;
+}
+
+int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct sk_buff *tmpl)
+{
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len;
+ struct wmi_fils_discovery_tmpl_cmd *cmd;
+
+ aligned_len = roundup(tmpl->len, 4);
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "vdev %i set FILS discovery template\n", vdev_id);
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_fils_discovery_tmpl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_FILS_DISCOVERY_TMPL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->buf_len = tmpl->len;
+ ptr = skb->data + sizeof(*cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, tmpl->data, tmpl->len);
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_FILS_DISCOVERY_TMPL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "WMI vdev %i failed to send FILS discovery template command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd fils discovery tmpl");
+
+ return 0;
+}
+
+int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct sk_buff *tmpl)
+{
+ struct wmi_probe_tmpl_cmd *cmd;
+ struct wmi_bcn_prb_info *probe_info;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ int ret, len;
+ size_t aligned_len = roundup(tmpl->len, 4);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "vdev %i set probe response template\n", vdev_id);
+
+ len = sizeof(*cmd) + sizeof(*probe_info) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_probe_tmpl_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PRB_TMPL_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->buf_len = tmpl->len;
+
+ ptr = skb->data + sizeof(*cmd);
+
+ probe_info = ptr;
+ len = sizeof(*probe_info);
+ probe_info->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_BCN_PRB_INFO) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ probe_info->caps = 0;
+ probe_info->erp = 0;
+
+ ptr += sizeof(*probe_info);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, tmpl->data, tmpl->len);
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_PRB_TMPL_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "WMI vdev %i failed to send probe response template command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd ");
+
+ return 0;
+}
+
+int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
+ bool unsol_bcast_probe_resp_enabled)
+{
+ struct sk_buff *skb;
+ int ret, len;
+ struct wmi_fils_discovery_cmd *cmd;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "vdev %i set %s interval to %u TU\n",
+ vdev_id, unsol_bcast_probe_resp_enabled ?
+ "unsolicited broadcast probe response" : "FILS discovery",
+ interval);
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_fils_discovery_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ENABLE_FILS_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->interval = interval;
+ cmd->config = unsol_bcast_probe_resp_enabled;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_ENABLE_FILS_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "WMI vdev %i failed to send FILS discovery enable/disable command\n",
+ vdev_id);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd enable fils");
+
+ return 0;
+}
+
+static void
+ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_obss_color_collision_event *ev;
+ struct ath11k_vif *arvif;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event obss color collision");
+
+ rcu_read_lock();
+
+ ev = tb[WMI_TAG_OBSS_COLOR_COLLISION_EVT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch obss color collision ev");
+ goto exit;
+ }
+
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to find arvif with vedv id %d in obss_color_collision_event\n",
+ ev->vdev_id);
+ goto exit;
+ }
+
+ switch (ev->evt_type) {
+ case WMI_BSS_COLOR_COLLISION_DETECTION:
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ GFP_KERNEL);
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
+ ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
+ break;
+ case WMI_BSS_COLOR_COLLISION_DISABLE:
+ case WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY:
+ case WMI_BSS_COLOR_FREE_SLOT_AVAILABLE:
+ break;
+ default:
+ ath11k_warn(ab, "received unknown obss color collision detection event\n");
+ }
+
+exit:
+ kfree(tb);
+ rcu_read_unlock();
+}
+
+static void
+ath11k_fill_band_to_mac_param(struct ath11k_base *soc,
+ struct wmi_host_pdev_band_to_mac *band_to_mac)
+{
+ u8 i;
+ struct ath11k_hal_reg_capabilities_ext *hal_reg_cap;
+ struct ath11k_pdev *pdev;
+
+ for (i = 0; i < soc->num_radios; i++) {
+ pdev = &soc->pdevs[i];
+ hal_reg_cap = &soc->hal_reg_cap[i];
+ band_to_mac[i].pdev_id = pdev->pdev_id;
+
+ switch (pdev->cap.supported_bands) {
+ case WMI_HOST_WLAN_2G_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ case WMI_HOST_WLAN_2G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_2ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_2ghz_chan;
+ break;
+ case WMI_HOST_WLAN_5G_CAP:
+ band_to_mac[i].start_freq = hal_reg_cap->low_5ghz_chan;
+ band_to_mac[i].end_freq = hal_reg_cap->high_5ghz_chan;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+static void
+ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
+ struct target_resource_config *tg_cfg)
+{
+ wmi_cfg->num_vdevs = tg_cfg->num_vdevs;
+ wmi_cfg->num_peers = tg_cfg->num_peers;
+ wmi_cfg->num_offload_peers = tg_cfg->num_offload_peers;
+ wmi_cfg->num_offload_reorder_buffs = tg_cfg->num_offload_reorder_buffs;
+ wmi_cfg->num_peer_keys = tg_cfg->num_peer_keys;
+ wmi_cfg->num_tids = tg_cfg->num_tids;
+ wmi_cfg->ast_skid_limit = tg_cfg->ast_skid_limit;
+ wmi_cfg->tx_chain_mask = tg_cfg->tx_chain_mask;
+ wmi_cfg->rx_chain_mask = tg_cfg->rx_chain_mask;
+ wmi_cfg->rx_timeout_pri[0] = tg_cfg->rx_timeout_pri[0];
+ wmi_cfg->rx_timeout_pri[1] = tg_cfg->rx_timeout_pri[1];
+ wmi_cfg->rx_timeout_pri[2] = tg_cfg->rx_timeout_pri[2];
+ wmi_cfg->rx_timeout_pri[3] = tg_cfg->rx_timeout_pri[3];
+ wmi_cfg->rx_decap_mode = tg_cfg->rx_decap_mode;
+ wmi_cfg->scan_max_pending_req = tg_cfg->scan_max_pending_req;
+ wmi_cfg->bmiss_offload_max_vdev = tg_cfg->bmiss_offload_max_vdev;
+ wmi_cfg->roam_offload_max_vdev = tg_cfg->roam_offload_max_vdev;
+ wmi_cfg->roam_offload_max_ap_profiles =
+ tg_cfg->roam_offload_max_ap_profiles;
+ wmi_cfg->num_mcast_groups = tg_cfg->num_mcast_groups;
+ wmi_cfg->num_mcast_table_elems = tg_cfg->num_mcast_table_elems;
+ wmi_cfg->mcast2ucast_mode = tg_cfg->mcast2ucast_mode;
+ wmi_cfg->tx_dbg_log_size = tg_cfg->tx_dbg_log_size;
+ wmi_cfg->num_wds_entries = tg_cfg->num_wds_entries;
+ wmi_cfg->dma_burst_size = tg_cfg->dma_burst_size;
+ wmi_cfg->mac_aggr_delim = tg_cfg->mac_aggr_delim;
+ wmi_cfg->rx_skip_defrag_timeout_dup_detection_check =
+ tg_cfg->rx_skip_defrag_timeout_dup_detection_check;
+ wmi_cfg->vow_config = tg_cfg->vow_config;
+ wmi_cfg->gtk_offload_max_vdev = tg_cfg->gtk_offload_max_vdev;
+ wmi_cfg->num_msdu_desc = tg_cfg->num_msdu_desc;
+ wmi_cfg->max_frag_entries = tg_cfg->max_frag_entries;
+ wmi_cfg->num_tdls_vdevs = tg_cfg->num_tdls_vdevs;
+ wmi_cfg->num_tdls_conn_table_entries =
+ tg_cfg->num_tdls_conn_table_entries;
+ wmi_cfg->beacon_tx_offload_max_vdev =
+ tg_cfg->beacon_tx_offload_max_vdev;
+ wmi_cfg->num_multicast_filter_entries =
+ tg_cfg->num_multicast_filter_entries;
+ wmi_cfg->num_wow_filters = tg_cfg->num_wow_filters;
+ wmi_cfg->num_keep_alive_pattern = tg_cfg->num_keep_alive_pattern;
+ wmi_cfg->keep_alive_pattern_size = tg_cfg->keep_alive_pattern_size;
+ wmi_cfg->max_tdls_concurrent_sleep_sta =
+ tg_cfg->max_tdls_concurrent_sleep_sta;
+ wmi_cfg->max_tdls_concurrent_buffer_sta =
+ tg_cfg->max_tdls_concurrent_buffer_sta;
+ wmi_cfg->wmi_send_separate = tg_cfg->wmi_send_separate;
+ wmi_cfg->num_ocb_vdevs = tg_cfg->num_ocb_vdevs;
+ wmi_cfg->num_ocb_channels = tg_cfg->num_ocb_channels;
+ wmi_cfg->num_ocb_schedules = tg_cfg->num_ocb_schedules;
+ wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
+ wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
+ wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
+ wmi_cfg->flag1 = tg_cfg->flag1;
+ wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
+ wmi_cfg->sched_params = tg_cfg->sched_params;
+ wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
+ wmi_cfg->twt_ap_sta_count = tg_cfg->twt_ap_sta_count;
+ wmi_cfg->host_service_flags &=
+ ~(1 << WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
+ wmi_cfg->host_service_flags |= (tg_cfg->is_reg_cc_ext_event_supported <<
+ WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT);
+ wmi_cfg->flags2 = WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET;
+ wmi_cfg->ema_max_vap_cnt = tg_cfg->ema_max_vap_cnt;
+ wmi_cfg->ema_max_profile_period = tg_cfg->ema_max_profile_period;
+}
+
+static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
+ struct wmi_init_cmd_param *param)
+{
+ struct ath11k_base *ab = wmi->wmi_ab->ab;
+ struct sk_buff *skb;
+ struct wmi_init_cmd *cmd;
+ struct wmi_resource_config *cfg;
+ struct wmi_pdev_set_hw_mode_cmd_param *hw_mode;
+ struct wmi_pdev_band_to_mac *band_to_mac;
+ struct wlan_host_mem_chunk *host_mem_chunks;
+ struct wmi_tlv *tlv;
+ size_t ret, len;
+ void *ptr;
+ u32 hw_mode_len = 0;
+ u16 idx;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX)
+ hw_mode_len = sizeof(*hw_mode) + TLV_HDR_SIZE +
+ (param->num_band_to_mac * sizeof(*band_to_mac));
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
+ (param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_init_cmd *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_INIT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ptr = skb->data + sizeof(*cmd);
+ cfg = ptr;
+
+ ath11k_wmi_copy_resource_config(cfg, param->res_cfg);
+
+ cfg->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_RESOURCE_CONFIG) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cfg) - TLV_HDR_SIZE);
+
+ ptr += sizeof(*cfg);
+ host_mem_chunks = ptr + TLV_HDR_SIZE;
+ len = sizeof(struct wlan_host_mem_chunk);
+
+ for (idx = 0; idx < param->num_mem_chunks; ++idx) {
+ host_mem_chunks[idx].tlv_header =
+ FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ host_mem_chunks[idx].ptr = param->mem_chunks[idx].paddr;
+ host_mem_chunks[idx].size = param->mem_chunks[idx].len;
+ host_mem_chunks[idx].req_id = param->mem_chunks[idx].req_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "host mem chunk req_id %d paddr 0x%llx len %d\n",
+ param->mem_chunks[idx].req_id,
+ (u64)param->mem_chunks[idx].paddr,
+ param->mem_chunks[idx].len);
+ }
+ cmd->num_host_mem_chunks = param->num_mem_chunks;
+ len = sizeof(struct wlan_host_mem_chunk) * param->num_mem_chunks;
+
+ /* num_mem_chunks is zero */
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+ ptr += TLV_HDR_SIZE + len;
+
+ if (param->hw_mode_id != WMI_HOST_HW_MODE_MAX) {
+ hw_mode = (struct wmi_pdev_set_hw_mode_cmd_param *)ptr;
+ hw_mode->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN,
+ sizeof(*hw_mode) - TLV_HDR_SIZE);
+
+ hw_mode->hw_mode_index = param->hw_mode_id;
+ hw_mode->num_band_to_mac = param->num_band_to_mac;
+
+ ptr += sizeof(*hw_mode);
+
+ len = param->num_band_to_mac * sizeof(*band_to_mac);
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, len);
+
+ ptr += TLV_HDR_SIZE;
+ len = sizeof(*band_to_mac);
+
+ for (idx = 0; idx < param->num_band_to_mac; idx++) {
+ band_to_mac = (void *)ptr;
+
+ band_to_mac->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_PDEV_BAND_TO_MAC) |
+ FIELD_PREP(WMI_TLV_LEN,
+ len - TLV_HDR_SIZE);
+ band_to_mac->pdev_id = param->band_to_mac[idx].pdev_id;
+ band_to_mac->start_freq =
+ param->band_to_mac[idx].start_freq;
+ band_to_mac->end_freq =
+ param->band_to_mac[idx].end_freq;
+ ptr += sizeof(*band_to_mac);
+ }
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_INIT_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "failed to send WMI_INIT_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd wmi init");
+
+ return 0;
+}
+
+int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar,
+ int pdev_id)
+{
+ struct ath11k_wmi_pdev_lro_config_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_pdev_lro_config_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_LRO_INFO_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ get_random_bytes(cmd->th_4, sizeof(uint32_t) * ATH11K_IPV4_TH_SEED_SIZE);
+ get_random_bytes(cmd->th_6, sizeof(uint32_t) * ATH11K_IPV6_TH_SEED_SIZE);
+
+ cmd->pdev_id = pdev_id;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb, WMI_LRO_CONFIG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send lro cfg req wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd lro config pdev_id 0x%x\n", pdev_id);
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab)
+{
+ unsigned long time_left;
+
+ time_left = wait_for_completion_timeout(&ab->wmi_ab.unified_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
+ enum wmi_host_hw_mode_config_type mode)
+{
+ struct wmi_pdev_set_hw_mode_cmd_param *cmd;
+ struct sk_buff *skb;
+ struct ath11k_wmi_base *wmi_ab = &ab->wmi_ab;
+ int len;
+ int ret;
+
+ len = sizeof(*cmd);
+
+ skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
+
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = WMI_PDEV_ID_SOC;
+ cmd->hw_mode_index = mode;
+
+ ret = ath11k_wmi_cmd_send(&wmi_ab->wmi[0], skb, WMI_PDEV_SET_HW_MODE_CMDID);
+ if (ret) {
+ ath11k_warn(ab, "failed to send WMI_PDEV_SET_HW_MODE_CMDID\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "cmd pdev set hw mode %d", cmd->hw_mode_index);
+
+ return 0;
+}
+
+int ath11k_wmi_cmd_init(struct ath11k_base *ab)
+{
+ struct ath11k_wmi_base *wmi_sc = &ab->wmi_ab;
+ struct wmi_init_cmd_param init_param;
+ struct target_resource_config config;
+
+ memset(&init_param, 0, sizeof(init_param));
+ memset(&config, 0, sizeof(config));
+
+ ab->hw_params.hw_ops->wmi_init_config(ab, &config);
+
+ if (test_bit(WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT,
+ ab->wmi_ab.svc_map))
+ config.is_reg_cc_ext_event_supported = 1;
+
+ memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
+
+ init_param.res_cfg = &wmi_sc->wlan_resource_config;
+ init_param.num_mem_chunks = wmi_sc->num_mem_chunks;
+ init_param.hw_mode_id = wmi_sc->preferred_hw_mode;
+ init_param.mem_chunks = wmi_sc->mem_chunks;
+
+ if (ab->hw_params.single_pdev_only)
+ init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
+
+ init_param.num_band_to_mac = ab->num_radios;
+ ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
+
+ return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
+}
+
+int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
+ struct ath11k_wmi_vdev_spectral_conf_param *param)
+{
+ struct ath11k_wmi_vdev_spectral_conf_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_vdev_spectral_conf_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ memcpy(&cmd->param, param, sizeof(*param));
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send spectral scan config wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev spectral scan configure vdev_id 0x%x\n",
+ param->vdev_id);
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
+ u32 trigger, u32 enable)
+{
+ struct ath11k_wmi_vdev_spectral_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_vdev_spectral_enable_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->trigger_cmd = trigger;
+ cmd->enable_cmd = enable;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send spectral enable wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd vdev spectral scan enable vdev id 0x%x\n",
+ vdev_id);
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param)
+{
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *cmd;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DMA_RING_CFG_REQ) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->pdev_id = param->pdev_id;
+ cmd->module_id = param->module_id;
+ cmd->base_paddr_lo = param->base_paddr_lo;
+ cmd->base_paddr_hi = param->base_paddr_hi;
+ cmd->head_idx_paddr_lo = param->head_idx_paddr_lo;
+ cmd->head_idx_paddr_hi = param->head_idx_paddr_hi;
+ cmd->tail_idx_paddr_lo = param->tail_idx_paddr_lo;
+ cmd->tail_idx_paddr_hi = param->tail_idx_paddr_hi;
+ cmd->num_elems = param->num_elems;
+ cmd->buf_size = param->buf_size;
+ cmd->num_resp_per_event = param->num_resp_per_event;
+ cmd->event_timeout_ms = param->event_timeout_ms;
+
+ ret = ath11k_wmi_cmd_send(ar->wmi, skb,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send dma ring cfg req wmi cmd\n");
+ goto err;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd pdev dma ring cfg req pdev_id 0x%x\n",
+ param->pdev_id);
+
+ return 0;
+err:
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int ath11k_wmi_tlv_dma_buf_entry_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_buf_release_parse *parse = data;
+
+ if (tag != WMI_TAG_DMA_BUF_RELEASE_ENTRY)
+ return -EPROTO;
+
+ if (parse->num_buf_entry >= parse->fixed.num_buf_release_entry)
+ return -ENOBUFS;
+
+ parse->num_buf_entry++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_dma_buf_meta_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_buf_release_parse *parse = data;
+
+ if (tag != WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA)
+ return -EPROTO;
+
+ if (parse->num_meta >= parse->fixed.num_meta_data_entry)
+ return -ENOBUFS;
+
+ parse->num_meta++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_dma_buf_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_buf_release_parse *parse = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_DMA_BUF_RELEASE:
+ memcpy(&parse->fixed, ptr,
+ sizeof(struct ath11k_wmi_dma_buf_release_fixed_param));
+ parse->fixed.pdev_id = DP_HW2SW_MACID(parse->fixed.pdev_id);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!parse->buf_entry_done) {
+ parse->num_buf_entry = 0;
+ parse->buf_entry = (struct wmi_dma_buf_release_entry *)ptr;
+
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_dma_buf_entry_parse,
+ parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse dma buf entry tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ parse->buf_entry_done = true;
+ } else if (!parse->meta_data_done) {
+ parse->num_meta = 0;
+ parse->meta_data = (struct wmi_dma_buf_release_meta_data *)ptr;
+
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_dma_buf_meta_parse,
+ parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse dma buf meta tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ parse->meta_data_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void ath11k_wmi_pdev_dma_ring_buf_release_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_tlv_dma_buf_release_parse parse = { };
+ struct ath11k_dbring_buf_release_event param;
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_dma_buf_parse,
+ &parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse dma buf release tlv %d\n", ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev dma ring buf release");
+
+ param.fixed = parse.fixed;
+ param.buf_entry = parse.buf_entry;
+ param.num_buf_entry = parse.num_buf_entry;
+ param.meta_data = parse.meta_data;
+ param.num_meta = parse.num_meta;
+
+ ret = ath11k_dbring_buffer_release_event(ab, &param);
+ if (ret) {
+ ath11k_warn(ab, "failed to handle dma buf release event %d\n", ret);
+ return;
+ }
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_cap;
+ u32 phy_map = 0;
+
+ if (tag != WMI_TAG_HW_MODE_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_hw_mode_caps >= svc_rdy_ext->param.num_hw_modes)
+ return -ENOBUFS;
+
+ hw_mode_cap = container_of(ptr, struct wmi_hw_mode_capabilities,
+ hw_mode_id);
+ svc_rdy_ext->n_hw_mode_caps++;
+
+ phy_map = hw_mode_cap->phy_id_map;
+ while (phy_map) {
+ svc_rdy_ext->tot_phy_id++;
+ phy_map = phy_map >> 1;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct wmi_hw_mode_capabilities *hw_mode_caps;
+ enum wmi_host_hw_mode_config_type mode, pref;
+ u32 i;
+ int ret;
+
+ svc_rdy_ext->n_hw_mode_caps = 0;
+ svc_rdy_ext->hw_mode_caps = (struct wmi_hw_mode_capabilities *)ptr;
+
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_hw_mode_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ i = 0;
+ while (i < svc_rdy_ext->n_hw_mode_caps) {
+ hw_mode_caps = &svc_rdy_ext->hw_mode_caps[i];
+ mode = hw_mode_caps->hw_mode_id;
+ pref = soc->wmi_ab.preferred_hw_mode;
+
+ if (ath11k_hw_mode_pri_map[mode] < ath11k_hw_mode_pri_map[pref]) {
+ svc_rdy_ext->pref_hw_mode_caps = *hw_mode_caps;
+ soc->wmi_ab.preferred_hw_mode = mode;
+ }
+ i++;
+ }
+
+ ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n",
+ soc->wmi_ab.preferred_hw_mode);
+ if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_MAC_PHY_CAPABILITIES)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_mac_phy_caps >= svc_rdy_ext->tot_phy_id)
+ return -ENOBUFS;
+
+ len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities));
+ if (!svc_rdy_ext->n_mac_phy_caps) {
+ svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id,
+ len, GFP_ATOMIC);
+ if (!svc_rdy_ext->mac_phy_caps)
+ return -ENOMEM;
+ }
+
+ memcpy(svc_rdy_ext->mac_phy_caps + svc_rdy_ext->n_mac_phy_caps, ptr, len);
+ svc_rdy_ext->n_mac_phy_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+
+ if (tag != WMI_TAG_HAL_REG_CAPABILITIES_EXT)
+ return -EPROTO;
+
+ if (svc_rdy_ext->n_ext_hal_reg_caps >= svc_rdy_ext->param.num_phy)
+ return -ENOBUFS;
+
+ svc_rdy_ext->n_ext_hal_reg_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_hal_reg_caps(struct ath11k_base *soc,
+ u16 len, const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ struct ath11k_hal_reg_capabilities_ext reg_cap;
+ int ret;
+ u32 i;
+
+ svc_rdy_ext->n_ext_hal_reg_caps = 0;
+ svc_rdy_ext->ext_hal_reg_caps = (struct wmi_hal_reg_capabilities_ext *)ptr;
+ ret = ath11k_wmi_tlv_iter(soc, ptr, len,
+ ath11k_wmi_tlv_ext_hal_reg_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(soc, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < svc_rdy_ext->param.num_phy; i++) {
+ ret = ath11k_pull_reg_cap_svc_rdy_ext(wmi_handle,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->ext_hal_reg_caps, i,
+ &reg_cap);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract reg cap %d\n", i);
+ return ret;
+ }
+
+ memcpy(&soc->hal_reg_cap[reg_cap.phy_id],
+ &reg_cap, sizeof(reg_cap));
+ }
+ return 0;
+}
+
+static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
+ u16 len, const void *ptr,
+ void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &soc->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
+ u32 phy_id_map;
+ int pdev_index = 0;
+ int ret;
+
+ svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
+ svc_rdy_ext->param.num_phy = svc_rdy_ext->soc_hal_reg_caps->num_phy;
+
+ soc->num_radios = 0;
+ soc->target_pdev_count = 0;
+ phy_id_map = svc_rdy_ext->pref_hw_mode_caps.phy_id_map;
+
+ while (phy_id_map && soc->num_radios < MAX_RADIOS) {
+ ret = ath11k_pull_mac_phy_cap_svc_ready_ext(wmi_handle,
+ svc_rdy_ext->hw_caps,
+ svc_rdy_ext->hw_mode_caps,
+ svc_rdy_ext->soc_hal_reg_caps,
+ svc_rdy_ext->mac_phy_caps,
+ hw_mode_id, soc->num_radios,
+ &soc->pdevs[pdev_index]);
+ if (ret) {
+ ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
+ soc->num_radios);
+ return ret;
+ }
+
+ soc->num_radios++;
+
+ /* For QCA6390, save mac_phy capability in the same pdev */
+ if (soc->hw_params.single_pdev_only)
+ pdev_index = 0;
+ else
+ pdev_index = soc->num_radios;
+
+ /* TODO: mac_phy_cap prints */
+ phy_id_map >>= 1;
+ }
+
+ /* For QCA6390, set num_radios to 1 because host manages
+ * both 2G and 5G radio in one pdev.
+ * Set pdev_id = 0 and 0 means soc level.
+ */
+ if (soc->hw_params.single_pdev_only) {
+ soc->num_radios = 1;
+ soc->pdevs[0].pdev_id = 0;
+ }
+
+ return 0;
+}
+
+static int ath11k_wmi_tlv_dma_ring_caps_parse(struct ath11k_base *soc,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_ring_caps_parse *parse = data;
+
+ if (tag != WMI_TAG_DMA_RING_CAPABILITIES)
+ return -EPROTO;
+
+ parse->n_dma_ring_caps++;
+ return 0;
+}
+
+static int ath11k_wmi_alloc_dbring_caps(struct ath11k_base *ab,
+ u32 num_cap)
+{
+ size_t sz;
+ void *ptr;
+
+ sz = num_cap * sizeof(struct ath11k_dbring_cap);
+ ptr = kzalloc(sz, GFP_ATOMIC);
+ if (!ptr)
+ return -ENOMEM;
+
+ ab->db_caps = ptr;
+ ab->num_db_cap = num_cap;
+
+ return 0;
+}
+
+static void ath11k_wmi_free_dbring_caps(struct ath11k_base *ab)
+{
+ kfree(ab->db_caps);
+ ab->db_caps = NULL;
+}
+
+static int ath11k_wmi_tlv_dma_ring_caps(struct ath11k_base *ab,
+ u16 len, const void *ptr, void *data)
+{
+ struct wmi_tlv_dma_ring_caps_parse *dma_caps_parse = data;
+ struct wmi_dma_ring_capabilities *dma_caps;
+ struct ath11k_dbring_cap *dir_buff_caps;
+ int ret;
+ u32 i;
+
+ dma_caps_parse->n_dma_ring_caps = 0;
+ dma_caps = (struct wmi_dma_ring_capabilities *)ptr;
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_dma_ring_caps_parse,
+ dma_caps_parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse dma ring caps tlv %d\n", ret);
+ return ret;
+ }
+
+ if (!dma_caps_parse->n_dma_ring_caps)
+ return 0;
+
+ if (ab->num_db_cap) {
+ ath11k_warn(ab, "Already processed, so ignoring dma ring caps\n");
+ return 0;
+ }
+
+ ret = ath11k_wmi_alloc_dbring_caps(ab, dma_caps_parse->n_dma_ring_caps);
+ if (ret)
+ return ret;
+
+ dir_buff_caps = ab->db_caps;
+ for (i = 0; i < dma_caps_parse->n_dma_ring_caps; i++) {
+ if (dma_caps[i].module_id >= WMI_DIRECT_BUF_MAX) {
+ ath11k_warn(ab, "Invalid module id %d\n", dma_caps[i].module_id);
+ ret = -EINVAL;
+ goto free_dir_buff;
+ }
+
+ dir_buff_caps[i].id = dma_caps[i].module_id;
+ dir_buff_caps[i].pdev_id = DP_HW2SW_MACID(dma_caps[i].pdev_id);
+ dir_buff_caps[i].min_elem = dma_caps[i].min_elem;
+ dir_buff_caps[i].min_buf_sz = dma_caps[i].min_buf_sz;
+ dir_buff_caps[i].min_buf_align = dma_caps[i].min_buf_align;
+ }
+
+ return 0;
+
+free_dir_buff:
+ ath11k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath11k_wmi_tlv_svc_rdy_ext_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct ath11k_pdev_wmi *wmi_handle = &ab->wmi_ab.wmi[0];
+ struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_READY_EXT_EVENT:
+ ret = ath11k_pull_svc_ready_ext(wmi_handle, ptr,
+ &svc_rdy_ext->param);
+ if (ret) {
+ ath11k_warn(ab, "unable to extract ext params\n");
+ return ret;
+ }
+ break;
+
+ case WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS:
+ svc_rdy_ext->hw_caps = (struct wmi_soc_mac_phy_hw_mode_caps *)ptr;
+ svc_rdy_ext->param.num_hw_modes = svc_rdy_ext->hw_caps->num_hw_modes;
+ break;
+
+ case WMI_TAG_SOC_HAL_REG_CAPABILITIES:
+ ret = ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+ break;
+
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!svc_rdy_ext->hw_mode_done) {
+ ret = ath11k_wmi_tlv_hw_mode_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->hw_mode_done = true;
+ } else if (!svc_rdy_ext->mac_phy_done) {
+ svc_rdy_ext->n_mac_phy_caps = 0;
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_mac_phy_caps_parse,
+ svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ svc_rdy_ext->mac_phy_done = true;
+ } else if (!svc_rdy_ext->ext_hal_reg_done) {
+ ret = ath11k_wmi_tlv_ext_hal_reg_caps(ab, len, ptr,
+ svc_rdy_ext);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->ext_hal_reg_done = true;
+ } else if (!svc_rdy_ext->mac_phy_chainmask_combo_done) {
+ svc_rdy_ext->mac_phy_chainmask_combo_done = true;
+ } else if (!svc_rdy_ext->mac_phy_chainmask_cap_done) {
+ svc_rdy_ext->mac_phy_chainmask_cap_done = true;
+ } else if (!svc_rdy_ext->oem_dma_ring_cap_done) {
+ svc_rdy_ext->oem_dma_ring_cap_done = true;
+ } else if (!svc_rdy_ext->dma_ring_cap_done) {
+ ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
+ &svc_rdy_ext->dma_caps_parse);
+ if (ret)
+ return ret;
+
+ svc_rdy_ext->dma_ring_cap_done = true;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int ath11k_service_ready_ext_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_rdy_ext_parse svc_rdy_ext = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_ext_parse,
+ &svc_rdy_ext);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ goto err;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext");
+
+ if (!test_bit(WMI_TLV_SERVICE_EXT2_MSG, ab->wmi_ab.svc_map))
+ complete(&ab->wmi_ab.service_ready);
+
+ kfree(svc_rdy_ext.mac_phy_caps);
+ return 0;
+
+err:
+ ath11k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath11k_wmi_tlv_svc_rdy_ext2_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_svc_rdy_ext2_parse *parse = data;
+ int ret;
+
+ switch (tag) {
+ case WMI_TAG_ARRAY_STRUCT:
+ if (!parse->dma_ring_cap_done) {
+ ret = ath11k_wmi_tlv_dma_ring_caps(ab, len, ptr,
+ &parse->dma_caps_parse);
+ if (ret)
+ return ret;
+
+ parse->dma_ring_cap_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_service_ready_ext2_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_tlv_svc_rdy_ext2_parse svc_rdy_ext2 = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_svc_rdy_ext2_parse,
+ &svc_rdy_ext2);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse ext2 event tlv %d\n", ret);
+ goto err;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event service ready ext2");
+
+ complete(&ab->wmi_ab.service_ready);
+
+ return 0;
+
+err:
+ ath11k_wmi_free_dbring_caps(ab);
+ return ret;
+}
+
+static int ath11k_pull_vdev_start_resp_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_start_resp_event *vdev_rsp)
+{
+ const void **tb;
+ const struct wmi_vdev_start_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_START_RESPONSE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev start resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(vdev_rsp, 0, sizeof(*vdev_rsp));
+
+ vdev_rsp->vdev_id = ev->vdev_id;
+ vdev_rsp->requestor_id = ev->requestor_id;
+ vdev_rsp->resp_type = ev->resp_type;
+ vdev_rsp->status = ev->status;
+ vdev_rsp->chain_mask = ev->chain_mask;
+ vdev_rsp->smps_mode = ev->smps_mode;
+ vdev_rsp->mac_id = ev->mac_id;
+ vdev_rsp->cfgd_tx_streams = ev->cfgd_tx_streams;
+ vdev_rsp->cfgd_rx_streams = ev->cfgd_rx_streams;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_print_reg_rule(struct ath11k_base *ab, const char *band,
+ u32 num_reg_rules,
+ struct cur_reg_rule *reg_rule_ptr)
+{
+ struct cur_reg_rule *reg_rule = reg_rule_ptr;
+ u32 count;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "number of reg rules in %s band: %d\n",
+ band, num_reg_rules);
+
+ for (count = 0; count < num_reg_rules; count++) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "reg rule %d: (%d - %d @ %d) (%d, %d) (FLAGS %d)\n",
+ count + 1, reg_rule->start_freq, reg_rule->end_freq,
+ reg_rule->max_bw, reg_rule->ant_gain,
+ reg_rule->reg_power, reg_rule->flags);
+ reg_rule++;
+ }
+}
+
+static struct cur_reg_rule
+*create_reg_rules_from_wmi(u32 num_reg_rules,
+ struct wmi_regulatory_rule_struct *wmi_reg_rule)
+{
+ struct cur_reg_rule *reg_rule_ptr;
+ u32 count;
+
+ reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr),
+ GFP_ATOMIC);
+
+ if (!reg_rule_ptr)
+ return NULL;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ reg_rule_ptr[count].start_freq =
+ FIELD_GET(REG_RULE_START_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].end_freq =
+ FIELD_GET(REG_RULE_END_FREQ,
+ wmi_reg_rule[count].freq_info);
+ reg_rule_ptr[count].max_bw =
+ FIELD_GET(REG_RULE_MAX_BW,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].reg_power =
+ FIELD_GET(REG_RULE_REG_PWR,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].ant_gain =
+ FIELD_GET(REG_RULE_ANT_GAIN,
+ wmi_reg_rule[count].bw_pwr_info);
+ reg_rule_ptr[count].flags =
+ FIELD_GET(REG_RULE_FLAGS,
+ wmi_reg_rule[count].flag_info);
+ }
+
+ return reg_rule_ptr;
+}
+
+static int ath11k_pull_reg_chan_list_update_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct cur_regulatory_info *reg_info)
+{
+ const void **tb;
+ const struct wmi_reg_chan_list_cc_event *chan_list_event_hdr;
+ struct wmi_regulatory_rule_struct *wmi_reg_rule;
+ u32 num_2ghz_reg_rules, num_5ghz_reg_rules;
+ int ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory channel list\n");
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ chan_list_event_hdr = tb[WMI_TAG_REG_CHAN_LIST_CC_EVENT];
+ if (!chan_list_event_hdr) {
+ ath11k_warn(ab, "failed to fetch reg chan list update ev\n");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ reg_info->num_2ghz_reg_rules = chan_list_event_hdr->num_2ghz_reg_rules;
+ reg_info->num_5ghz_reg_rules = chan_list_event_hdr->num_5ghz_reg_rules;
+
+ if (!(reg_info->num_2ghz_reg_rules + reg_info->num_5ghz_reg_rules)) {
+ ath11k_warn(ab, "No regulatory rules available in the event info\n");
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ memcpy(reg_info->alpha2, &chan_list_event_hdr->alpha2,
+ REG_ALPHA2_LEN);
+ reg_info->dfs_region = chan_list_event_hdr->dfs_region;
+ reg_info->phybitmap = chan_list_event_hdr->phybitmap;
+ reg_info->num_phy = chan_list_event_hdr->num_phy;
+ reg_info->phy_id = chan_list_event_hdr->phy_id;
+ reg_info->ctry_code = chan_list_event_hdr->country_id;
+ reg_info->reg_dmn_pair = chan_list_event_hdr->domain_code;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "status_code %s",
+ ath11k_cc_status_to_str(reg_info->status_code));
+
+ reg_info->status_code =
+ ath11k_wmi_cc_setting_code_to_reg(chan_list_event_hdr->status_code);
+
+ reg_info->is_ext_reg_event = false;
+
+ reg_info->min_bw_2ghz = chan_list_event_hdr->min_bw_2ghz;
+ reg_info->max_bw_2ghz = chan_list_event_hdr->max_bw_2ghz;
+ reg_info->min_bw_5ghz = chan_list_event_hdr->min_bw_5ghz;
+ reg_info->max_bw_5ghz = chan_list_event_hdr->max_bw_5ghz;
+
+ num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
+ num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "cc %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d",
+ reg_info->alpha2, reg_info->dfs_region,
+ reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
+ reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "num_2ghz_reg_rules %d num_5ghz_reg_rules %d",
+ num_2ghz_reg_rules, num_5ghz_reg_rules);
+
+ wmi_reg_rule =
+ (struct wmi_regulatory_rule_struct *)((u8 *)chan_list_event_hdr
+ + sizeof(*chan_list_event_hdr)
+ + sizeof(struct wmi_tlv));
+
+ if (num_2ghz_reg_rules) {
+ reg_info->reg_rules_2ghz_ptr =
+ create_reg_rules_from_wmi(num_2ghz_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_2ghz_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, "2 GHz",
+ num_2ghz_reg_rules,
+ reg_info->reg_rules_2ghz_ptr);
+ }
+
+ if (num_5ghz_reg_rules) {
+ wmi_reg_rule += num_2ghz_reg_rules;
+ reg_info->reg_rules_5ghz_ptr =
+ create_reg_rules_from_wmi(num_5ghz_reg_rules,
+ wmi_reg_rule);
+ if (!reg_info->reg_rules_5ghz_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, "5 GHz",
+ num_5ghz_reg_rules,
+ reg_info->reg_rules_5ghz_ptr);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory channel list\n");
+
+ kfree(tb);
+ return 0;
+}
+
+static struct cur_reg_rule
+*create_ext_reg_rules_from_wmi(u32 num_reg_rules,
+ struct wmi_regulatory_ext_rule *wmi_reg_rule)
+{
+ struct cur_reg_rule *reg_rule_ptr;
+ u32 count;
+
+ reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr), GFP_ATOMIC);
+
+ if (!reg_rule_ptr)
+ return NULL;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ reg_rule_ptr[count].start_freq =
+ u32_get_bits(wmi_reg_rule[count].freq_info,
+ REG_RULE_START_FREQ);
+ reg_rule_ptr[count].end_freq =
+ u32_get_bits(wmi_reg_rule[count].freq_info,
+ REG_RULE_END_FREQ);
+ reg_rule_ptr[count].max_bw =
+ u32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_MAX_BW);
+ reg_rule_ptr[count].reg_power =
+ u32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_REG_PWR);
+ reg_rule_ptr[count].ant_gain =
+ u32_get_bits(wmi_reg_rule[count].bw_pwr_info,
+ REG_RULE_ANT_GAIN);
+ reg_rule_ptr[count].flags =
+ u32_get_bits(wmi_reg_rule[count].flag_info,
+ REG_RULE_FLAGS);
+ reg_rule_ptr[count].psd_flag =
+ u32_get_bits(wmi_reg_rule[count].psd_power_info,
+ REG_RULE_PSD_INFO);
+ reg_rule_ptr[count].psd_eirp =
+ u32_get_bits(wmi_reg_rule[count].psd_power_info,
+ REG_RULE_PSD_EIRP);
+ }
+
+ return reg_rule_ptr;
+}
+
+static u8
+ath11k_invalid_5ghz_reg_ext_rules_from_wmi(u32 num_reg_rules,
+ const struct wmi_regulatory_ext_rule *rule)
+{
+ u8 num_invalid_5ghz_rules = 0;
+ u32 count, start_freq;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ start_freq = u32_get_bits(rule[count].freq_info,
+ REG_RULE_START_FREQ);
+
+ if (start_freq >= ATH11K_MIN_6G_FREQ)
+ num_invalid_5ghz_rules++;
+ }
+
+ return num_invalid_5ghz_rules;
+}
+
+static int ath11k_pull_reg_chan_list_ext_update_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct cur_regulatory_info *reg_info)
+{
+ const void **tb;
+ const struct wmi_reg_chan_list_cc_ext_event *ev;
+ struct wmi_regulatory_ext_rule *ext_wmi_reg_rule;
+ u32 num_2ghz_reg_rules, num_5ghz_reg_rules;
+ u32 num_6ghz_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 num_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 total_reg_rules = 0;
+ int ret, i, j, num_invalid_5ghz_ext_rules = 0;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processing regulatory ext channel list\n");
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch reg chan list ext update ev\n");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ reg_info->num_2ghz_reg_rules = ev->num_2ghz_reg_rules;
+ reg_info->num_5ghz_reg_rules = ev->num_5ghz_reg_rules;
+ reg_info->num_6ghz_rules_ap[WMI_REG_INDOOR_AP] =
+ ev->num_6ghz_reg_rules_ap_lpi;
+ reg_info->num_6ghz_rules_ap[WMI_REG_STANDARD_POWER_AP] =
+ ev->num_6ghz_reg_rules_ap_sp;
+ reg_info->num_6ghz_rules_ap[WMI_REG_VERY_LOW_POWER_AP] =
+ ev->num_6ghz_reg_rules_ap_vlp;
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i] =
+ ev->num_6ghz_reg_rules_client_lpi[i];
+ reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i] =
+ ev->num_6ghz_reg_rules_client_sp[i];
+ reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ ev->num_6ghz_reg_rules_client_vlp[i];
+ }
+
+ num_2ghz_reg_rules = reg_info->num_2ghz_reg_rules;
+ num_5ghz_reg_rules = reg_info->num_5ghz_reg_rules;
+
+ total_reg_rules += num_2ghz_reg_rules;
+ total_reg_rules += num_5ghz_reg_rules;
+
+ if ((num_2ghz_reg_rules > MAX_REG_RULES) ||
+ (num_5ghz_reg_rules > MAX_REG_RULES)) {
+ ath11k_warn(ab, "Num reg rules for 2.4 GHz/5 GHz exceeds max limit (num_2ghz_reg_rules: %d num_5ghz_reg_rules: %d max_rules: %d)\n",
+ num_2ghz_reg_rules, num_5ghz_reg_rules, MAX_REG_RULES);
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ num_6ghz_reg_rules_ap[i] = reg_info->num_6ghz_rules_ap[i];
+
+ if (num_6ghz_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
+ ath11k_warn(ab, "Num 6 GHz reg rules for AP mode(%d) exceeds max limit (num_6ghz_reg_rules_ap: %d, max_rules: %d)\n",
+ i, num_6ghz_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ total_reg_rules += num_6ghz_reg_rules_ap[i];
+ }
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ num_6ghz_client[WMI_REG_INDOOR_AP][i] =
+ reg_info->num_6ghz_rules_client[WMI_REG_INDOOR_AP][i];
+ total_reg_rules += num_6ghz_client[WMI_REG_INDOOR_AP][i];
+
+ num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
+ reg_info->num_6ghz_rules_client[WMI_REG_STANDARD_POWER_AP][i];
+ total_reg_rules += num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i];
+
+ num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ reg_info->num_6ghz_rules_client[WMI_REG_VERY_LOW_POWER_AP][i];
+ total_reg_rules += num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i];
+
+ if ((num_6ghz_client[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES) ||
+ (num_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] >
+ MAX_6GHZ_REG_RULES) ||
+ (num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] >
+ MAX_6GHZ_REG_RULES)) {
+ ath11k_warn(ab,
+ "Num 6 GHz client reg rules exceeds max limit, for client(type: %d)\n",
+ i);
+ kfree(tb);
+ return -EINVAL;
+ }
+ }
+
+ if (!total_reg_rules) {
+ ath11k_warn(ab, "No reg rules available\n");
+ kfree(tb);
+ return -EINVAL;
+ }
+
+ memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
+
+ reg_info->dfs_region = ev->dfs_region;
+ reg_info->phybitmap = ev->phybitmap;
+ reg_info->num_phy = ev->num_phy;
+ reg_info->phy_id = ev->phy_id;
+ reg_info->ctry_code = ev->country_id;
+ reg_info->reg_dmn_pair = ev->domain_code;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "status_code %s",
+ ath11k_cc_status_to_str(reg_info->status_code));
+
+ reg_info->status_code =
+ ath11k_wmi_cc_setting_code_to_reg(ev->status_code);
+
+ reg_info->is_ext_reg_event = true;
+
+ reg_info->min_bw_2ghz = ev->min_bw_2ghz;
+ reg_info->max_bw_2ghz = ev->max_bw_2ghz;
+ reg_info->min_bw_5ghz = ev->min_bw_5ghz;
+ reg_info->max_bw_5ghz = ev->max_bw_5ghz;
+
+ reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP] =
+ ev->min_bw_6ghz_ap_lpi;
+ reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP] =
+ ev->max_bw_6ghz_ap_lpi;
+ reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] =
+ ev->min_bw_6ghz_ap_sp;
+ reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP] =
+ ev->max_bw_6ghz_ap_sp;
+ reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] =
+ ev->min_bw_6ghz_ap_vlp;
+ reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] =
+ ev->max_bw_6ghz_ap_vlp;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz AP BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n",
+ reg_info->min_bw_6ghz_ap[WMI_REG_INDOOR_AP],
+ reg_info->max_bw_6ghz_ap[WMI_REG_INDOOR_AP],
+ reg_info->min_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP],
+ reg_info->max_bw_6ghz_ap[WMI_REG_STANDARD_POWER_AP],
+ reg_info->min_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP],
+ reg_info->max_bw_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP]);
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i] =
+ ev->min_bw_6ghz_client_lpi[i];
+ reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i] =
+ ev->max_bw_6ghz_client_lpi[i];
+ reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
+ ev->min_bw_6ghz_client_sp[i];
+ reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
+ ev->max_bw_6ghz_client_sp[i];
+ reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ ev->min_bw_6ghz_client_vlp[i];
+ reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ ev->max_bw_6ghz_client_vlp[i];
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz %s BW: LPI (%d - %d), SP (%d - %d), VLP (%d - %d)\n",
+ ath11k_6ghz_client_type_to_str(i),
+ reg_info->min_bw_6ghz_client[WMI_REG_INDOOR_AP][i],
+ reg_info->max_bw_6ghz_client[WMI_REG_INDOOR_AP][i],
+ reg_info->min_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i],
+ reg_info->max_bw_6ghz_client[WMI_REG_STANDARD_POWER_AP][i],
+ reg_info->min_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i],
+ reg_info->max_bw_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i]);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "cc_ext %s dsf %d BW: min_2ghz %d max_2ghz %d min_5ghz %d max_5ghz %d",
+ reg_info->alpha2, reg_info->dfs_region,
+ reg_info->min_bw_2ghz, reg_info->max_bw_2ghz,
+ reg_info->min_bw_5ghz, reg_info->max_bw_5ghz);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "num_2ghz_reg_rules %d num_5ghz_reg_rules %d",
+ num_2ghz_reg_rules, num_5ghz_reg_rules);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "num_6ghz_reg_rules_ap_lpi: %d num_6ghz_reg_rules_ap_sp: %d num_6ghz_reg_rules_ap_vlp: %d",
+ num_6ghz_reg_rules_ap[WMI_REG_INDOOR_AP],
+ num_6ghz_reg_rules_ap[WMI_REG_STANDARD_POWER_AP],
+ num_6ghz_reg_rules_ap[WMI_REG_VERY_LOW_POWER_AP]);
+
+ j = WMI_REG_DEFAULT_CLIENT;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz Regular client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d",
+ num_6ghz_client[WMI_REG_INDOOR_AP][j],
+ num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j],
+ num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]);
+
+ j = WMI_REG_SUBORDINATE_CLIENT;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz Subordinate client: num_6ghz_reg_rules_lpi: %d num_6ghz_reg_rules_sp: %d num_6ghz_reg_rules_vlp: %d",
+ num_6ghz_client[WMI_REG_INDOOR_AP][j],
+ num_6ghz_client[WMI_REG_STANDARD_POWER_AP][j],
+ num_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][j]);
+
+ ext_wmi_reg_rule =
+ (struct wmi_regulatory_ext_rule *)((u8 *)ev + sizeof(*ev) +
+ sizeof(struct wmi_tlv));
+ if (num_2ghz_reg_rules) {
+ reg_info->reg_rules_2ghz_ptr =
+ create_ext_reg_rules_from_wmi(num_2ghz_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_2ghz_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 2 GHz rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, "2 GHz",
+ num_2ghz_reg_rules,
+ reg_info->reg_rules_2ghz_ptr);
+ }
+
+ ext_wmi_reg_rule += num_2ghz_reg_rules;
+
+ /* Firmware might include 6 GHz reg rule in 5 GHz rule list
+ * for few countries along with separate 6 GHz rule.
+ * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
+ * causes intersect check to be true, and same rules will be
+ * shown multiple times in iw cmd.
+ * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
+ */
+ num_invalid_5ghz_ext_rules =
+ ath11k_invalid_5ghz_reg_ext_rules_from_wmi(num_5ghz_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (num_invalid_5ghz_ext_rules) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
+ reg_info->alpha2, reg_info->num_5ghz_reg_rules,
+ num_invalid_5ghz_ext_rules);
+
+ num_5ghz_reg_rules = num_5ghz_reg_rules - num_invalid_5ghz_ext_rules;
+ reg_info->num_5ghz_reg_rules = num_5ghz_reg_rules;
+ }
+
+ if (num_5ghz_reg_rules) {
+ reg_info->reg_rules_5ghz_ptr =
+ create_ext_reg_rules_from_wmi(num_5ghz_reg_rules,
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_5ghz_ptr) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 5 GHz rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, "5 GHz",
+ num_5ghz_reg_rules,
+ reg_info->reg_rules_5ghz_ptr);
+ }
+
+ /* We have adjusted the number of 5 GHz reg rules above. But still those
+ * many rules needs to be adjusted in ext_wmi_reg_rule.
+ *
+ * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
+ */
+ ext_wmi_reg_rule += (num_5ghz_reg_rules + num_invalid_5ghz_ext_rules);
+
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
+ reg_info->reg_rules_6ghz_ap_ptr[i] =
+ create_ext_reg_rules_from_wmi(num_6ghz_reg_rules_ap[i],
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_6ghz_ap_ptr[i]) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 6 GHz AP rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab, ath11k_6ghz_ap_type_to_str(i),
+ num_6ghz_reg_rules_ap[i],
+ reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+ ext_wmi_reg_rule += num_6ghz_reg_rules_ap[i];
+ }
+
+ for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz AP type %s", ath11k_6ghz_ap_type_to_str(j));
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->reg_rules_6ghz_client_ptr[j][i] =
+ create_ext_reg_rules_from_wmi(num_6ghz_client[j][i],
+ ext_wmi_reg_rule);
+
+ if (!reg_info->reg_rules_6ghz_client_ptr[j][i]) {
+ kfree(tb);
+ ath11k_warn(ab, "Unable to Allocate memory for 6 GHz client rules\n");
+ return -ENOMEM;
+ }
+
+ ath11k_print_reg_rule(ab,
+ ath11k_6ghz_client_type_to_str(i),
+ num_6ghz_client[j][i],
+ reg_info->reg_rules_6ghz_client_ptr[j][i]);
+
+ ext_wmi_reg_rule += num_6ghz_client[j][i];
+ }
+ }
+
+ reg_info->client_type = ev->client_type;
+ reg_info->rnr_tpe_usable = ev->rnr_tpe_usable;
+ reg_info->unspecified_ap_usable =
+ ev->unspecified_ap_usable;
+ reg_info->domain_code_6ghz_ap[WMI_REG_INDOOR_AP] =
+ ev->domain_code_6ghz_ap_lpi;
+ reg_info->domain_code_6ghz_ap[WMI_REG_STANDARD_POWER_AP] =
+ ev->domain_code_6ghz_ap_sp;
+ reg_info->domain_code_6ghz_ap[WMI_REG_VERY_LOW_POWER_AP] =
+ ev->domain_code_6ghz_ap_vlp;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz reg info client type %s rnr_tpe_usable %d unspecified_ap_usable %d AP sub domain: lpi %s, sp %s, vlp %s\n",
+ ath11k_6ghz_client_type_to_str(reg_info->client_type),
+ reg_info->rnr_tpe_usable,
+ reg_info->unspecified_ap_usable,
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_lpi),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_sp),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_ap_vlp));
+
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
+ reg_info->domain_code_6ghz_client[WMI_REG_INDOOR_AP][i] =
+ ev->domain_code_6ghz_client_lpi[i];
+ reg_info->domain_code_6ghz_client[WMI_REG_STANDARD_POWER_AP][i] =
+ ev->domain_code_6ghz_client_sp[i];
+ reg_info->domain_code_6ghz_client[WMI_REG_VERY_LOW_POWER_AP][i] =
+ ev->domain_code_6ghz_client_vlp[i];
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz client type %s client sub domain: lpi %s, sp %s, vlp %s\n",
+ ath11k_6ghz_client_type_to_str(i),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_lpi[i]),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_sp[i]),
+ ath11k_sub_reg_6ghz_to_str(ev->domain_code_6ghz_client_vlp[i])
+ );
+ }
+
+ reg_info->domain_code_6ghz_super_id = ev->domain_code_6ghz_super_id;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "6 GHz client_type %s 6 GHz super domain %s",
+ ath11k_6ghz_client_type_to_str(reg_info->client_type),
+ ath11k_super_reg_6ghz_to_str(reg_info->domain_code_6ghz_super_id));
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "processed regulatory ext channel list\n");
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_del_resp_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_delete_resp_event *peer_del_resp)
+{
+ const void **tb;
+ const struct wmi_peer_delete_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ memset(peer_del_resp, 0, sizeof(*peer_del_resp));
+
+ peer_del_resp->vdev_id = ev->vdev_id;
+ ether_addr_copy(peer_del_resp->peer_macaddr.addr,
+ ev->peer_macaddr.addr);
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_vdev_del_resp_ev(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_delete_resp_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_DELETE_RESP_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev delete resp ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_bcn_tx_status_ev(struct ath11k_base *ab, void *evt_buf,
+ u32 len, u32 *vdev_id,
+ u32 *tx_status)
+{
+ const void **tb;
+ const struct wmi_bcn_tx_status_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch bcn tx status ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+ *tx_status = ev->tx_status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_vdev_stopped_param_tlv(struct ath11k_base *ab, struct sk_buff *skb,
+ u32 *vdev_id)
+{
+ const void **tb;
+ const struct wmi_vdev_stopped_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_STOPPED_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev stop ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ *vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_wmi_tlv_mgmt_rx_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_mgmt_rx_parse *parse = data;
+
+ switch (tag) {
+ case WMI_TAG_MGMT_RX_HDR:
+ parse->fixed = ptr;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ if (!parse->frame_buf_done) {
+ parse->frame_buf = ptr;
+ parse->frame_buf_done = true;
+ }
+ break;
+ }
+ return 0;
+}
+
+static int ath11k_pull_mgmt_rx_params_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct mgmt_rx_event_params *hdr)
+{
+ struct wmi_tlv_mgmt_rx_parse parse = { };
+ const struct wmi_mgmt_rx_hdr *ev;
+ const u8 *frame;
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_mgmt_rx_parse,
+ &parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse mgmt rx tlv %d\n",
+ ret);
+ return ret;
+ }
+
+ ev = parse.fixed;
+ frame = parse.frame_buf;
+
+ if (!ev || !frame) {
+ ath11k_warn(ab, "failed to fetch mgmt rx hdr");
+ return -EPROTO;
+ }
+
+ hdr->pdev_id = ev->pdev_id;
+ hdr->chan_freq = ev->chan_freq;
+ hdr->channel = ev->channel;
+ hdr->snr = ev->snr;
+ hdr->rate = ev->rate;
+ hdr->phy_mode = ev->phy_mode;
+ hdr->buf_len = ev->buf_len;
+ hdr->status = ev->status;
+ hdr->flags = ev->flags;
+ hdr->rssi = ev->rssi;
+ hdr->tsf_delta = ev->tsf_delta;
+ memcpy(hdr->rssi_ctl, ev->rssi_ctl, sizeof(hdr->rssi_ctl));
+
+ if (skb->len < (frame - skb->data) + hdr->buf_len) {
+ ath11k_warn(ab, "invalid length in mgmt rx hdr ev");
+ return -EPROTO;
+ }
+
+ /* shift the sk_buff to point to `frame` */
+ skb_trim(skb, 0);
+ skb_put(skb, frame - skb->data);
+ skb_pull(skb, frame - skb->data);
+ skb_put(skb, hdr->buf_len);
+
+ ath11k_ce_byte_swap(skb->data, hdr->buf_len);
+
+ return 0;
+}
+
+static int wmi_process_mgmt_tx_comp(struct ath11k *ar,
+ struct wmi_mgmt_tx_compl_event *tx_compl_param)
+{
+ struct sk_buff *msdu;
+ struct ieee80211_tx_info *info;
+ struct ath11k_skb_cb *skb_cb;
+ int num_mgmt;
+
+ spin_lock_bh(&ar->txmgmt_idr_lock);
+ msdu = idr_find(&ar->txmgmt_idr, tx_compl_param->desc_id);
+
+ if (!msdu) {
+ ath11k_warn(ar->ab, "received mgmt tx compl for invalid msdu_id: %d\n",
+ tx_compl_param->desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+ return -ENOENT;
+ }
+
+ idr_remove(&ar->txmgmt_idr, tx_compl_param->desc_id);
+ spin_unlock_bh(&ar->txmgmt_idr_lock);
+
+ skb_cb = ATH11K_SKB_CB(msdu);
+ dma_unmap_single(ar->ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+
+ info = IEEE80211_SKB_CB(msdu);
+ if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) &&
+ !tx_compl_param->status) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ if (test_bit(WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI,
+ ar->ab->wmi_ab.svc_map))
+ info->status.ack_signal = tx_compl_param->ack_rssi;
+ }
+
+ ieee80211_tx_status_irqsafe(ar->hw, msdu);
+
+ num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
+
+ /* WARN when we received this event without doing any mgmt tx */
+ if (num_mgmt < 0)
+ WARN_ON_ONCE(1);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "mgmt tx comp pending %d desc id %d\n",
+ num_mgmt, tx_compl_param->desc_id);
+
+ if (!num_mgmt)
+ wake_up(&ar->txmgmt_empty_waitq);
+
+ return 0;
+}
+
+static int ath11k_pull_mgmt_tx_compl_param_tlv(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ struct wmi_mgmt_tx_compl_event *param)
+{
+ const void **tb;
+ const struct wmi_mgmt_tx_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_MGMT_TX_COMPL_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch mgmt tx compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ param->pdev_id = ev->pdev_id;
+ param->desc_id = ev->desc_id;
+ param->status = ev->status;
+ param->ack_rssi = ev->ack_rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_event_scan_started(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan started event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ ar->scan.state = ATH11K_SCAN_RUNNING;
+ if (ar->scan.is_roc)
+ ieee80211_ready_on_channel(ar->hw);
+ complete(&ar->scan.started);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_start_failed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ath11k_warn(ar->ab, "received scan start failed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_STARTING:
+ complete(&ar->scan.started);
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_completed(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ /* One suspected reason scan can be completed while starting is
+ * if firmware fails to deliver all scan events to the host,
+ * e.g. when transport pipe is full. This has been observed
+ * with spectral scan phyerr events starving wmi transport
+ * pipe. In such case the "scan completed" event should be (and
+ * is) ignored by the host as it may be just firmware's scan
+ * state machine recovering.
+ */
+ ath11k_warn(ar->ab, "received scan completed event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ __ath11k_mac_scan_finish(ar);
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_bss_chan(struct ath11k *ar)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan bss chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = NULL;
+ break;
+ }
+}
+
+static void ath11k_wmi_event_scan_foreign_chan(struct ath11k *ar, u32 freq)
+{
+ lockdep_assert_held(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ar->ab, "received scan foreign chan event in an invalid scan state: %s (%d)\n",
+ ath11k_scan_state_str(ar->scan.state),
+ ar->scan.state);
+ break;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ ar->scan_channel = ieee80211_get_channel(ar->hw->wiphy, freq);
+ if (ar->scan.is_roc && ar->scan.roc_freq == freq)
+ complete(&ar->scan.on_channel);
+ break;
+ }
+}
+
+static const char *
+ath11k_wmi_event_scan_type_str(enum wmi_scan_event_type type,
+ enum wmi_scan_completion_reason reason)
+{
+ switch (type) {
+ case WMI_SCAN_EVENT_STARTED:
+ return "started";
+ case WMI_SCAN_EVENT_COMPLETED:
+ switch (reason) {
+ case WMI_SCAN_REASON_COMPLETED:
+ return "completed";
+ case WMI_SCAN_REASON_CANCELLED:
+ return "completed [cancelled]";
+ case WMI_SCAN_REASON_PREEMPTED:
+ return "completed [preempted]";
+ case WMI_SCAN_REASON_TIMEDOUT:
+ return "completed [timedout]";
+ case WMI_SCAN_REASON_INTERNAL_FAILURE:
+ return "completed [internal err]";
+ case WMI_SCAN_REASON_MAX:
+ break;
+ }
+ return "completed [unknown]";
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ return "bss channel";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ return "foreign channel";
+ case WMI_SCAN_EVENT_DEQUEUED:
+ return "dequeued";
+ case WMI_SCAN_EVENT_PREEMPTED:
+ return "preempted";
+ case WMI_SCAN_EVENT_START_FAILED:
+ return "start failed";
+ case WMI_SCAN_EVENT_RESTARTED:
+ return "restarted";
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ return "foreign channel exit";
+ default:
+ return "unknown";
+ }
+}
+
+static int ath11k_pull_scan_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_scan_event *scan_evt_param)
+{
+ const void **tb;
+ const struct wmi_scan_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_SCAN_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch scan ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ scan_evt_param->event_type = ev->event_type;
+ scan_evt_param->reason = ev->reason;
+ scan_evt_param->channel_freq = ev->channel_freq;
+ scan_evt_param->scan_req_id = ev->scan_req_id;
+ scan_evt_param->scan_id = ev->scan_id;
+ scan_evt_param->vdev_id = ev->vdev_id;
+ scan_evt_param->tsf_timestamp = ev->tsf_timestamp;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_sta_kickout_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_sta_kickout_arg *arg)
+{
+ const void **tb;
+ const struct wmi_peer_sta_kickout_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_KICKOUT_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer sta kickout ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->mac_addr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_roam_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_roam_event *roam_ev)
+{
+ const void **tb;
+ const struct wmi_roam_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_ROAM_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch roam ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ roam_ev->vdev_id = ev->vdev_id;
+ roam_ev->reason = ev->reason;
+ roam_ev->rssi = ev->rssi;
+
+ kfree(tb);
+ return 0;
+}
+
+static int freq_to_idx(struct ath11k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
+static int ath11k_pull_chan_info_ev(struct ath11k_base *ab, u8 *evt_buf,
+ u32 len, struct wmi_chan_info_event *ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, evt_buf, len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ ch_info_ev->err_code = ev->err_code;
+ ch_info_ev->freq = ev->freq;
+ ch_info_ev->cmd_flags = ev->cmd_flags;
+ ch_info_ev->noise_floor = ev->noise_floor;
+ ch_info_ev->rx_clear_count = ev->rx_clear_count;
+ ch_info_ev->cycle_count = ev->cycle_count;
+ ch_info_ev->chan_tx_pwr_range = ev->chan_tx_pwr_range;
+ ch_info_ev->chan_tx_pwr_tp = ev->chan_tx_pwr_tp;
+ ch_info_ev->rx_frame_count = ev->rx_frame_count;
+ ch_info_ev->tx_frame_cnt = ev->tx_frame_cnt;
+ ch_info_ev->mac_clk_mhz = ev->mac_clk_mhz;
+ ch_info_ev->vdev_id = ev->vdev_id;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_pdev_bss_chan_info_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_pdev_bss_chan_info_event *bss_ch_info_ev)
+{
+ const void **tb;
+ const struct wmi_pdev_bss_chan_info_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev bss chan info ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ bss_ch_info_ev->pdev_id = ev->pdev_id;
+ bss_ch_info_ev->freq = ev->freq;
+ bss_ch_info_ev->noise_floor = ev->noise_floor;
+ bss_ch_info_ev->rx_clear_count_low = ev->rx_clear_count_low;
+ bss_ch_info_ev->rx_clear_count_high = ev->rx_clear_count_high;
+ bss_ch_info_ev->cycle_count_low = ev->cycle_count_low;
+ bss_ch_info_ev->cycle_count_high = ev->cycle_count_high;
+ bss_ch_info_ev->tx_cycle_count_low = ev->tx_cycle_count_low;
+ bss_ch_info_ev->tx_cycle_count_high = ev->tx_cycle_count_high;
+ bss_ch_info_ev->rx_cycle_count_low = ev->rx_cycle_count_low;
+ bss_ch_info_ev->rx_cycle_count_high = ev->rx_cycle_count_high;
+ bss_ch_info_ev->rx_bss_cycle_count_low = ev->rx_bss_cycle_count_low;
+ bss_ch_info_ev->rx_bss_cycle_count_high = ev->rx_bss_cycle_count_high;
+
+ kfree(tb);
+ return 0;
+}
+
+static int
+ath11k_pull_vdev_install_key_compl_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_vdev_install_key_complete_arg *arg)
+{
+ const void **tb;
+ const struct wmi_vdev_install_key_compl_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch vdev install key compl ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ arg->vdev_id = ev->vdev_id;
+ arg->macaddr = ev->peer_macaddr.addr;
+ arg->key_idx = ev->key_idx;
+ arg->key_flags = ev->key_flags;
+ arg->status = ev->status;
+
+ kfree(tb);
+ return 0;
+}
+
+static int ath11k_pull_peer_assoc_conf_ev(struct ath11k_base *ab, struct sk_buff *skb,
+ struct wmi_peer_assoc_conf_arg *peer_assoc_conf)
+{
+ const void **tb;
+ const struct wmi_peer_assoc_conf_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_PEER_ASSOC_CONF_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch peer assoc conf ev");
+ kfree(tb);
+ return -EPROTO;
+ }
+
+ peer_assoc_conf->vdev_id = ev->vdev_id;
+ peer_assoc_conf->macaddr = ev->peer_macaddr.addr;
+
+ kfree(tb);
+ return 0;
+}
+
+static void ath11k_wmi_pull_pdev_stats_base(const struct wmi_pdev_stats_base *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->ch_noise_floor = src->chan_nf;
+ dst->tx_frame_count = src->tx_frame_count;
+ dst->rx_frame_count = src->rx_frame_count;
+ dst->rx_clear_count = src->rx_clear_count;
+ dst->cycle_count = src->cycle_count;
+ dst->phy_err_count = src->phy_err_count;
+ dst->chan_tx_power = src->chan_tx_pwr;
+}
+
+static void
+ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->comp_queued = src->comp_queued;
+ dst->comp_delivered = src->comp_delivered;
+ dst->msdu_enqued = src->msdu_enqued;
+ dst->mpdu_enqued = src->mpdu_enqued;
+ dst->wmm_drop = src->wmm_drop;
+ dst->local_enqued = src->local_enqued;
+ dst->local_freed = src->local_freed;
+ dst->hw_queued = src->hw_queued;
+ dst->hw_reaped = src->hw_reaped;
+ dst->underrun = src->underrun;
+ dst->hw_paused = src->hw_paused;
+ dst->tx_abort = src->tx_abort;
+ dst->mpdus_requeued = src->mpdus_requeued;
+ dst->tx_ko = src->tx_ko;
+ dst->tx_xretry = src->tx_xretry;
+ dst->data_rc = src->data_rc;
+ dst->self_triggers = src->self_triggers;
+ dst->sw_retry_failure = src->sw_retry_failure;
+ dst->illgl_rate_phy_err = src->illgl_rate_phy_err;
+ dst->pdev_cont_xretry = src->pdev_cont_xretry;
+ dst->pdev_tx_timeout = src->pdev_tx_timeout;
+ dst->pdev_resets = src->pdev_resets;
+ dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
+ dst->phy_underrun = src->phy_underrun;
+ dst->txop_ovf = src->txop_ovf;
+ dst->seq_posted = src->seq_posted;
+ dst->seq_failed_queueing = src->seq_failed_queueing;
+ dst->seq_completed = src->seq_completed;
+ dst->seq_restarted = src->seq_restarted;
+ dst->mu_seq_posted = src->mu_seq_posted;
+ dst->mpdus_sw_flush = src->mpdus_sw_flush;
+ dst->mpdus_hw_filter = src->mpdus_hw_filter;
+ dst->mpdus_truncated = src->mpdus_truncated;
+ dst->mpdus_ack_failed = src->mpdus_ack_failed;
+ dst->mpdus_expired = src->mpdus_expired;
+}
+
+static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
+ struct ath11k_fw_stats_pdev *dst)
+{
+ dst->mid_ppdu_route_change = src->mid_ppdu_route_change;
+ dst->status_rcvd = src->status_rcvd;
+ dst->r0_frags = src->r0_frags;
+ dst->r1_frags = src->r1_frags;
+ dst->r2_frags = src->r2_frags;
+ dst->r3_frags = src->r3_frags;
+ dst->htt_msdus = src->htt_msdus;
+ dst->htt_mpdus = src->htt_mpdus;
+ dst->loc_msdus = src->loc_msdus;
+ dst->loc_mpdus = src->loc_mpdus;
+ dst->oversize_amsdu = src->oversize_amsdu;
+ dst->phy_errs = src->phy_errs;
+ dst->phy_err_drop = src->phy_err_drop;
+ dst->mpdu_errs = src->mpdu_errs;
+ dst->rx_ovfl_errs = src->rx_ovfl_errs;
+}
+
+static void
+ath11k_wmi_pull_vdev_stats(const struct wmi_vdev_stats *src,
+ struct ath11k_fw_stats_vdev *dst)
+{
+ int i;
+
+ dst->vdev_id = src->vdev_id;
+ dst->beacon_snr = src->beacon_snr;
+ dst->data_snr = src->data_snr;
+ dst->num_rx_frames = src->num_rx_frames;
+ dst->num_rts_fail = src->num_rts_fail;
+ dst->num_rts_success = src->num_rts_success;
+ dst->num_rx_err = src->num_rx_err;
+ dst->num_rx_discard = src->num_rx_discard;
+ dst->num_tx_not_acked = src->num_tx_not_acked;
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames); i++)
+ dst->num_tx_frames[i] = src->num_tx_frames[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_retries); i++)
+ dst->num_tx_frames_retries[i] = src->num_tx_frames_retries[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->num_tx_frames_failures); i++)
+ dst->num_tx_frames_failures[i] = src->num_tx_frames_failures[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->tx_rate_history); i++)
+ dst->tx_rate_history[i] = src->tx_rate_history[i];
+
+ for (i = 0; i < ARRAY_SIZE(src->beacon_rssi_history); i++)
+ dst->beacon_rssi_history[i] = src->beacon_rssi_history[i];
+}
+
+static void
+ath11k_wmi_pull_bcn_stats(const struct wmi_bcn_stats *src,
+ struct ath11k_fw_stats_bcn *dst)
+{
+ dst->vdev_id = src->vdev_id;
+ dst->tx_bcn_succ_cnt = src->tx_bcn_succ_cnt;
+ dst->tx_bcn_outage_cnt = src->tx_bcn_outage_cnt;
+}
+
+static int ath11k_wmi_tlv_rssi_chain_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath11k_fw_stats *stats = parse->stats;
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ const struct wmi_rssi_stats *stats_rssi = (const struct wmi_rssi_stats *)ptr;
+ int j, ret = 0;
+
+ if (tag != WMI_TAG_RSSI_STATS)
+ return -EPROTO;
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ stats->stats_id = WMI_REQUEST_RSSI_PER_CHAIN_STAT;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats vdev id %d mac %pM\n",
+ stats_rssi->vdev_id, stats_rssi->peer_macaddr.addr);
+
+ arvif = ath11k_mac_get_arvif(ar, stats_rssi->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "not found vif for vdev id %d\n",
+ stats_rssi->vdev_id);
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats bssid %pM vif %p\n",
+ arvif->bssid, arvif->vif);
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arvif->bssid,
+ NULL);
+ if (!sta) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for rssi chain\n",
+ arvif->bssid);
+ goto exit;
+ }
+
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
+ ARRAY_SIZE(stats_rssi->rssi_avg_beacon));
+
+ for (j = 0; j < ARRAY_SIZE(arsta->chain_signal); j++) {
+ arsta->chain_signal[j] = stats_rssi->rssi_avg_beacon[j];
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats beacon rssi[%d] %d data rssi[%d] %d\n",
+ j,
+ stats_rssi->rssi_avg_beacon[j],
+ j,
+ stats_rssi->rssi_avg_data[j]);
+ }
+
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath11k_wmi_tlv_fw_stats_data_parse(struct ath11k_base *ab,
+ struct wmi_tlv_fw_stats_parse *parse,
+ const void *ptr,
+ u16 len)
+{
+ struct ath11k_fw_stats *stats = parse->stats;
+ const struct wmi_stats_event *ev = parse->ev;
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ieee80211_sta *sta;
+ struct ath11k_sta *arsta;
+ int i, ret = 0;
+ const void *data = ptr;
+
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch update stats ev");
+ return -EPROTO;
+ }
+
+ stats->stats_id = 0;
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ for (i = 0; i < ev->num_pdev_stats; i++) {
+ const struct wmi_pdev_stats *src;
+ struct ath11k_fw_stats_pdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ stats->stats_id = WMI_REQUEST_PDEV_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_pdev_stats_base(&src->base, dst);
+ ath11k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+ ath11k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+ list_add_tail(&dst->list, &stats->pdevs);
+ }
+
+ for (i = 0; i < ev->num_vdev_stats; i++) {
+ const struct wmi_vdev_stats *src;
+ struct ath11k_fw_stats_vdev *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ stats->stats_id = WMI_REQUEST_VDEV_STAT;
+
+ arvif = ath11k_mac_get_arvif(ar, src->vdev_id);
+ if (arvif) {
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arvif->bssid,
+ NULL);
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+ arsta->rssi_beacon = src->beacon_snr;
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats vdev id %d snr %d\n",
+ src->vdev_id, src->beacon_snr);
+ } else {
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "not found station of bssid %pM for vdev stat\n",
+ arvif->bssid);
+ }
+ }
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_vdev_stats(src, dst);
+ list_add_tail(&dst->list, &stats->vdevs);
+ }
+
+ for (i = 0; i < ev->num_bcn_stats; i++) {
+ const struct wmi_bcn_stats *src;
+ struct ath11k_fw_stats_bcn *dst;
+
+ src = data;
+ if (len < sizeof(*src)) {
+ ret = -EPROTO;
+ goto exit;
+ }
+
+ stats->stats_id = WMI_REQUEST_BCN_STAT;
+
+ data += sizeof(*src);
+ len -= sizeof(*src);
+
+ dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+ if (!dst)
+ continue;
+
+ ath11k_wmi_pull_bcn_stats(src, dst);
+ list_add_tail(&dst->list, &stats->bcn);
+ }
+
+exit:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int ath11k_wmi_tlv_fw_stats_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_fw_stats_parse *parse = data;
+ int ret = 0;
+
+ switch (tag) {
+ case WMI_TAG_STATS_EVENT:
+ parse->ev = (struct wmi_stats_event *)ptr;
+ parse->stats->pdev_id = parse->ev->pdev_id;
+ break;
+ case WMI_TAG_ARRAY_BYTE:
+ ret = ath11k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
+ break;
+ case WMI_TAG_PER_CHAIN_RSSI_STATS:
+ parse->rssi = (struct wmi_per_chain_rssi_stats *)ptr;
+
+ if (parse->ev->stats_id & WMI_REQUEST_RSSI_PER_CHAIN_STAT)
+ parse->rssi_num = parse->rssi->num_per_chain_rssi_stats;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "stats id 0x%x num chain %d\n",
+ parse->ev->stats_id,
+ parse->rssi_num);
+ break;
+ case WMI_TAG_ARRAY_STRUCT:
+ if (parse->rssi_num && !parse->chain_rssi_done) {
+ ret = ath11k_wmi_tlv_iter(ab, ptr, len,
+ ath11k_wmi_tlv_rssi_chain_parse,
+ parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse rssi chain %d\n",
+ ret);
+ return ret;
+ }
+ parse->chain_rssi_done = true;
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats)
+{
+ struct wmi_tlv_fw_stats_parse parse = { };
+
+ stats->stats_id = 0;
+ parse.stats = stats;
+
+ return ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_fw_stats_parse,
+ &parse);
+}
+
+static void
+ath11k_wmi_fw_pdev_base_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k PDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Channel noise floor", pdev->ch_noise_floor);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Channel TX power", pdev->chan_tx_power);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX frame count", pdev->tx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX frame count", pdev->rx_frame_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "RX clear count", pdev->rx_clear_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Cycle count", pdev->cycle_count);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY error count", pdev->phy_err_count);
+
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV TX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies queued", pdev->comp_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HTT cookies disp.", pdev->comp_delivered);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDU queued", pdev->msdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU queued", pdev->mpdu_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs dropped", pdev->wmm_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local enqued", pdev->local_enqued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Local freed", pdev->local_freed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "HW queued", pdev->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs reaped", pdev->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num underruns", pdev->underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num HW Paused", pdev->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PPDUs cleaned", pdev->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs requeued", pdev->mpdus_requeued);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PPDU OK", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "HW rate", pdev->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Sched self triggers", pdev->self_triggers);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Dropped due to SW retries",
+ pdev->sw_retry_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Illegal rate phy errors",
+ pdev->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV continuous xretry", pdev->pdev_cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "TX timeout", pdev->pdev_tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PDEV resets", pdev->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Stateless TIDs alloc failures",
+ pdev->stateless_tid_alloc_failure);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "PHY underrun", pdev->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "MPDU is more than txop limit", pdev->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences posted", pdev->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num seq failed queueing ", pdev->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences completed ", pdev->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences restarted ", pdev->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MU sequences posted ", pdev->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS truncated ", pdev->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS expired ", pdev->mpdus_expired);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+
+ len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+ "ath11k PDEV RX stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "====================");
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Mid PPDU route change",
+ pdev->mid_ppdu_route_change);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Tot. number of statuses", pdev->status_rcvd);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 0", pdev->r0_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 1", pdev->r1_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 2", pdev->r2_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Extra frags on rings 3", pdev->r3_frags);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to HTT", pdev->htt_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to HTT", pdev->htt_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MSDUs delivered to stack", pdev->loc_msdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDUs delivered to stack", pdev->loc_mpdus);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Oversized AMSUs", pdev->oversize_amsdu);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors", pdev->phy_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "PHY errors drops", pdev->phy_err_drop);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Overflow errors", pdev->rx_ovfl_errs);
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_vdev_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_vdev *vdev,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, vdev->vdev_id);
+ u8 *vif_macaddr;
+ int i;
+
+ /* VDEV stats has all the active VDEVs of other PDEVs as well,
+ * ignoring those not part of requested PDEV
+ */
+ if (!arvif)
+ return;
+
+ vif_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", vdev->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vif_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "beacon snr", vdev->beacon_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "data snr", vdev->data_snr);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx frames", vdev->num_rx_frames);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts fail", vdev->num_rts_fail);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rts success", vdev->num_rts_success);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx err", vdev->num_rx_err);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num rx discard", vdev->num_rx_discard);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "num tx not acked", vdev->num_tx_not_acked);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames", i,
+ vdev->num_tx_frames[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_retries); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames retries", i,
+ vdev->num_tx_frames_retries[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->num_tx_frames_failures); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "num tx frames failures", i,
+ vdev->num_tx_frames_failures[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->tx_rate_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] 0x%08x\n",
+ "tx rate history", i,
+ vdev->tx_rate_history[i]);
+
+ for (i = 0 ; i < ARRAY_SIZE(vdev->beacon_rssi_history); i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%25s [%02d] %u\n",
+ "beacon rssi history", i,
+ vdev->beacon_rssi_history[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+static void
+ath11k_wmi_fw_bcn_stats_fill(struct ath11k *ar,
+ const struct ath11k_fw_stats_bcn *bcn,
+ char *buf, u32 *length)
+{
+ u32 len = *length;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ struct ath11k_vif *arvif = ath11k_mac_get_arvif(ar, bcn->vdev_id);
+ u8 *vdev_macaddr;
+
+ if (!arvif) {
+ ath11k_warn(ar->ab, "invalid vdev id %d in bcn stats",
+ bcn->vdev_id);
+ return;
+ }
+
+ vdev_macaddr = arvif->vif->addr;
+
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "VDEV ID", bcn->vdev_id);
+ len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+ "VDEV MAC address", vdev_macaddr);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "================");
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
+ len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+ "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ *length = len;
+}
+
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats,
+ u32 stats_id, char *buf)
+{
+ u32 len = 0;
+ u32 buf_len = ATH11K_FW_STATS_BUF_SIZE;
+ const struct ath11k_fw_stats_pdev *pdev;
+ const struct ath11k_fw_stats_vdev *vdev;
+ const struct ath11k_fw_stats_bcn *bcn;
+ size_t num_bcn;
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats_id == WMI_REQUEST_PDEV_STAT) {
+ pdev = list_first_entry_or_null(&fw_stats->pdevs,
+ struct ath11k_fw_stats_pdev, list);
+ if (!pdev) {
+ ath11k_warn(ar->ab, "failed to get pdev stats\n");
+ goto unlock;
+ }
+
+ ath11k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
+ ath11k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_VDEV_STAT) {
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n",
+ "ath11k VDEV stats");
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "=================");
+
+ list_for_each_entry(vdev, &fw_stats->vdevs, list)
+ ath11k_wmi_fw_vdev_stats_fill(ar, vdev, buf, &len);
+ }
+
+ if (stats_id == WMI_REQUEST_BCN_STAT) {
+ num_bcn = list_count_nodes(&fw_stats->bcn);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+ "ath11k Beacon stats", num_bcn);
+ len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+ "===================");
+
+ list_for_each_entry(bcn, &fw_stats->bcn, list)
+ ath11k_wmi_fw_bcn_stats_fill(ar, bcn, buf, &len);
+ }
+
+unlock:
+ spin_unlock_bh(&ar->data_lock);
+
+ if (len >= buf_len)
+ buf[len - 1] = 0;
+ else
+ buf[len] = 0;
+}
+
+static void ath11k_wmi_op_ep_tx_credits(struct ath11k_base *ab)
+{
+ /* try to send pending beacons first. they take priority */
+ wake_up(&ab->wmi_ab.tx_credits_wq);
+}
+
+static int ath11k_reg_11d_new_cc_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const struct wmi_11d_new_cc_ev *ev;
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ const void **tb;
+ int ret, i;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return ret;
+ }
+
+ ev = tb[WMI_TAG_11D_NEW_COUNTRY_EVENT];
+ if (!ev) {
+ kfree(tb);
+ ath11k_warn(ab, "failed to fetch 11d new cc ev");
+ return -EPROTO;
+ }
+
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ab->new_alpha2, &ev->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event 11d new cc %c%c\n",
+ ab->new_alpha2[0],
+ ab->new_alpha2[1]);
+
+ kfree(tb);
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ ar->state_11d = ATH11K_11D_IDLE;
+ complete(&ar->completed_11d_scan);
+ }
+
+ queue_work(ab->workqueue, &ab->update_11d_work);
+
+ return 0;
+}
+
+static void ath11k_wmi_htc_tx_complete(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k_pdev_wmi *wmi = NULL;
+ u32 i;
+ u8 wmi_ep_count;
+ u8 eid;
+
+ eid = ATH11K_SKB_CB(skb)->eid;
+ dev_kfree_skb(skb);
+
+ if (eid >= ATH11K_HTC_EP_COUNT)
+ return;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > ab->hw_params.max_radios)
+ return;
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++) {
+ if (ab->wmi_ab.wmi[i].eid == eid) {
+ wmi = &ab->wmi_ab.wmi[i];
+ break;
+ }
+ }
+
+ if (wmi)
+ wake_up(&wmi->tx_ce_desc_wq);
+}
+
+static bool ath11k_reg_is_world_alpha(char *alpha)
+{
+ if (alpha[0] == '0' && alpha[1] == '0')
+ return true;
+
+ if (alpha[0] == 'n' && alpha[1] == 'a')
+ return true;
+
+ return false;
+}
+
+static int ath11k_reg_chan_list_event(struct ath11k_base *ab,
+ struct sk_buff *skb,
+ enum wmi_reg_chan_list_cmd_type id)
+{
+ struct cur_regulatory_info *reg_info = NULL;
+ struct ieee80211_regdomain *regd = NULL;
+ bool intersect = false;
+ int ret = 0, pdev_idx, i, j;
+ struct ath11k *ar;
+
+ reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+ if (!reg_info) {
+ ret = -ENOMEM;
+ goto fallback;
+ }
+
+ if (id == WMI_REG_CHAN_LIST_CC_ID)
+ ret = ath11k_pull_reg_chan_list_update_ev(ab, skb, reg_info);
+ else
+ ret = ath11k_pull_reg_chan_list_ext_update_ev(ab, skb, reg_info);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to extract regulatory info from received event\n");
+ goto fallback;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event reg chan list id %d", id);
+
+ if (reg_info->status_code != REG_SET_CC_STATUS_PASS) {
+ /* In case of failure to set the requested ctry,
+ * fw retains the current regd. We print a failure info
+ * and return from here.
+ */
+ ath11k_warn(ab, "Failed to set the requested Country regulatory setting\n");
+ goto mem_free;
+ }
+
+ pdev_idx = reg_info->phy_id;
+
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock(&ab->base_lock);
+ goto mem_free;
+ }
+ spin_unlock(&ab->base_lock);
+
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback.
+ */
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ goto mem_free;
+ else
+ goto fallback;
+ }
+
+ /* Avoid multiple overwrites to default regd, during core
+ * stop-start after mac registration.
+ */
+ if (ab->default_regd[pdev_idx] && !ab->new_regd[pdev_idx] &&
+ !memcmp((char *)ab->default_regd[pdev_idx]->alpha2,
+ (char *)reg_info->alpha2, 2))
+ goto mem_free;
+
+ /* Intersect new rules with default regd if a new country setting was
+ * requested, i.e a default regd was already set during initialization
+ * and the regd coming from this event has a valid country info.
+ */
+ if (ab->default_regd[pdev_idx] &&
+ !ath11k_reg_is_world_alpha((char *)
+ ab->default_regd[pdev_idx]->alpha2) &&
+ !ath11k_reg_is_world_alpha((char *)reg_info->alpha2))
+ intersect = true;
+
+ regd = ath11k_reg_build_regd(ab, reg_info, intersect);
+ if (!regd) {
+ ath11k_warn(ab, "failed to build regd from reg_info\n");
+ goto fallback;
+ }
+
+ spin_lock(&ab->base_lock);
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
+ * Free previously built regd before assigning the newly
+ * generated regd to ar. NULL pointer handling will be
+ * taken care by kfree itself.
+ */
+ ar = ab->pdevs[pdev_idx].ar;
+ kfree(ab->new_regd[pdev_idx]);
+ ab->new_regd[pdev_idx] = regd;
+ queue_work(ab->workqueue, &ar->regd_update_work);
+ } else {
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
+ */
+ ab->default_regd[pdev_idx] = regd;
+ }
+ ab->dfs_region = reg_info->dfs_region;
+ spin_unlock(&ab->base_lock);
+
+ goto mem_free;
+
+fallback:
+ /* Fallback to older reg (by sending previous country setting
+ * again if fw has succeeded and we failed to process here.
+ * The Regdomain should be uniform across driver and fw. Since the
+ * FW has processed the command and sent a success status, we expect
+ * this function to succeed as well. If it doesn't, CTRY needs to be
+ * reverted at the fw and the old SCAN_CHAN_LIST cmd needs to be sent.
+ */
+ /* TODO: This is rare, but still should also be handled */
+ WARN_ON(1);
+mem_free:
+ if (reg_info) {
+ kfree(reg_info->reg_rules_2ghz_ptr);
+ kfree(reg_info->reg_rules_5ghz_ptr);
+ if (reg_info->is_ext_reg_event) {
+ for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++)
+ kfree(reg_info->reg_rules_6ghz_ap_ptr[i]);
+
+ for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
+ for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++)
+ kfree(reg_info->reg_rules_6ghz_client_ptr[j][i]);
+ }
+ kfree(reg_info);
+ }
+ return ret;
+}
+
+static int ath11k_wmi_tlv_rdy_parse(struct ath11k_base *ab, u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_tlv_rdy_parse *rdy_parse = data;
+ struct wmi_ready_event fixed_param;
+ struct wmi_mac_addr *addr_list;
+ struct ath11k_pdev *pdev;
+ u32 num_mac_addr;
+ int i;
+
+ switch (tag) {
+ case WMI_TAG_READY_EVENT:
+ memset(&fixed_param, 0, sizeof(fixed_param));
+ memcpy(&fixed_param, (struct wmi_ready_event *)ptr,
+ min_t(u16, sizeof(fixed_param), len));
+ ab->wlan_init_status = fixed_param.ready_event_min.status;
+ rdy_parse->num_extra_mac_addr =
+ fixed_param.ready_event_min.num_extra_mac_addr;
+
+ ether_addr_copy(ab->mac_addr,
+ fixed_param.ready_event_min.mac_addr.addr);
+ ab->pktlog_defs_checksum = fixed_param.pktlog_defs_checksum;
+ ab->wmi_ready = true;
+ break;
+ case WMI_TAG_ARRAY_FIXED_STRUCT:
+ addr_list = (struct wmi_mac_addr *)ptr;
+ num_mac_addr = rdy_parse->num_extra_mac_addr;
+
+ if (!(ab->num_radios > 1 && num_mac_addr >= ab->num_radios))
+ break;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
+ }
+ ab->pdevs_macaddr_valid = true;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ath11k_ready_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_tlv_rdy_parse rdy_parse = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_rdy_parse, &rdy_parse);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse tlv %d\n", ret);
+ return ret;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event ready");
+
+ complete(&ab->wmi_ab.unified_ready);
+ return 0;
+}
+
+static void ath11k_peer_delete_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_delete_resp_event peer_del_resp;
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_del_resp_ev(ab, skb, &peer_del_resp) != 0) {
+ ath11k_warn(ab, "failed to extract peer delete resp");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer delete resp");
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_del_resp.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer delete resp ev %d",
+ peer_del_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->peer_delete_done);
+ rcu_read_unlock();
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "peer delete resp for vdev id %d addr %pM\n",
+ peer_del_resp.vdev_id, peer_del_resp.peer_macaddr.addr);
+}
+
+static void ath11k_vdev_delete_resp_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ u32 vdev_id = 0;
+
+ if (ath11k_pull_vdev_del_resp_ev(ab, skb, &vdev_id) != 0) {
+ ath11k_warn(ab, "failed to extract vdev delete resp");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev delete resp ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_delete_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev delete resp for vdev id %d\n",
+ vdev_id);
+}
+
+static inline const char *ath11k_wmi_vdev_resp_print(u32 vdev_resp_status)
+{
+ switch (vdev_resp_status) {
+ case WMI_VDEV_START_RESPONSE_INVALID_VDEVID:
+ return "invalid vdev id";
+ case WMI_VDEV_START_RESPONSE_NOT_SUPPORTED:
+ return "not supported";
+ case WMI_VDEV_START_RESPONSE_DFS_VIOLATION:
+ return "dfs violation";
+ case WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN:
+ return "invalid regdomain";
+ default:
+ return "unknown";
+ }
+}
+
+static void ath11k_vdev_start_resp_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_vdev_start_resp_event vdev_start_resp;
+ struct ath11k *ar;
+ u32 status;
+
+ if (ath11k_pull_vdev_start_resp_tlv(ab, skb, &vdev_start_resp) != 0) {
+ ath11k_warn(ab, "failed to extract vdev start resp");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event start resp event");
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_start_resp.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev start resp ev %d",
+ vdev_start_resp.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->last_wmi_vdev_start_status = 0;
+
+ status = vdev_start_resp.status;
+
+ if (WARN_ON_ONCE(status)) {
+ ath11k_warn(ab, "vdev start resp error status %d (%s)\n",
+ status, ath11k_wmi_vdev_resp_print(status));
+ ar->last_wmi_vdev_start_status = status;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev start resp for vdev id %d",
+ vdev_start_resp.vdev_id);
+}
+
+static void ath11k_bcn_tx_status_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k_vif *arvif;
+ u32 vdev_id, tx_status;
+
+ if (ath11k_pull_bcn_tx_status_ev(ab, skb->data, skb->len,
+ &vdev_id, &tx_status) != 0) {
+ ath11k_warn(ab, "failed to extract bcn tx status");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event offload bcn tx status");
+
+ rcu_read_lock();
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "invalid vdev id %d in bcn_tx_status",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ ath11k_mac_bcn_tx_event(arvif);
+ rcu_read_unlock();
+}
+
+static void ath11k_wmi_event_peer_sta_ps_state_chg(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_peer_sta_ps_state_chg_event *ev;
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+ struct ath11k_sta *arsta;
+ const void **tb;
+ enum ath11k_wmi_peer_ps_state peer_previous_ps_state;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch sta ps change ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event peer sta ps change ev addr %pM state %u sup_bitmap %x ps_valid %u ts %u\n",
+ ev->peer_macaddr.addr, ev->peer_ps_state,
+ ev->ps_supported_bitmap, ev->peer_ps_valid,
+ ev->peer_ps_timestamp);
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, ev->peer_macaddr.addr);
+
+ if (!peer) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "peer not found %pM\n", ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer->vdev_id);
+
+ if (!ar) {
+ spin_unlock_bh(&ab->base_lock);
+ ath11k_warn(ab, "invalid vdev id in peer sta ps state change ev %d",
+ peer->vdev_id);
+
+ goto exit;
+ }
+
+ sta = peer->sta;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!sta) {
+ ath11k_warn(ab, "failed to find station entry %pM\n",
+ ev->peer_macaddr.addr);
+ goto exit;
+ }
+
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer_previous_ps_state = arsta->peer_ps_state;
+ arsta->peer_ps_state = ev->peer_ps_state;
+ arsta->peer_current_ps_valid = !!ev->peer_ps_valid;
+
+ if (test_bit(WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ if (!(ev->ps_supported_bitmap & WMI_PEER_PS_VALID) ||
+ !(ev->ps_supported_bitmap & WMI_PEER_PS_STATE_TIMESTAMP) ||
+ !ev->peer_ps_valid)
+ goto out;
+
+ if (arsta->peer_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_start_time = ev->peer_ps_timestamp;
+ arsta->ps_start_jiffies = jiffies;
+ } else if (arsta->peer_ps_state == WMI_PEER_PS_STATE_OFF &&
+ peer_previous_ps_state == WMI_PEER_PS_STATE_ON) {
+ arsta->ps_total_duration = arsta->ps_total_duration +
+ (ev->peer_ps_timestamp - arsta->ps_start_time);
+ }
+
+ if (ar->ps_timekeeper_enable)
+ trace_ath11k_ps_timekeeper(ar, ev->peer_macaddr.addr,
+ ev->peer_ps_timestamp,
+ arsta->peer_ps_state);
+ }
+
+out:
+ spin_unlock_bh(&ar->data_lock);
+exit:
+ rcu_read_unlock();
+ kfree(tb);
+}
+
+static void ath11k_vdev_stopped_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ u32 vdev_id = 0;
+
+ if (ath11k_pull_vdev_stopped_param_tlv(ab, skb, &vdev_id) != 0) {
+ ath11k_warn(ab, "failed to extract vdev stopped event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event vdev stopped");
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in vdev stopped ev %d",
+ vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->vdev_setup_done);
+
+ rcu_read_unlock();
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "vdev stopped for vdev id %d", vdev_id);
+}
+
+static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct mgmt_rx_event_params rx_ev = {0};
+ struct ath11k *ar;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+ struct ieee80211_supported_band *sband;
+
+ if (ath11k_pull_mgmt_rx_params_tlv(ab, skb, &rx_ev) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt rx event");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ memset(status, 0, sizeof(*status));
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT, "event mgmt rx status %08x\n",
+ rx_ev.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, rx_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev_id %d in mgmt_rx_event\n",
+ rx_ev.pdev_id);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if ((test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) ||
+ (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
+ WMI_RX_STATUS_ERR_KEY_CACHE_MISS | WMI_RX_STATUS_ERR_CRC))) {
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) {
+ status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
+ } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
+ status->band = NL80211_BAND_2GHZ;
+ } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ /* Shouldn't happen unless list of advertised channels to
+ * mac80211 has been changed.
+ */
+ WARN_ON_ONCE(1);
+ dev_kfree_skb(skb);
+ goto exit;
+ }
+
+ if (rx_ev.phy_mode == MODE_11B &&
+ (status->band == NL80211_BAND_5GHZ || status->band == NL80211_BAND_6GHZ))
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "mgmt rx 11b (CCK) on 5/6GHz, band = %d\n", status->band);
+
+ sband = &ar->mac.sbands[status->band];
+
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
+ status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
+ status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
+ /* In case of PMF, FW delivers decrypted frames with Protected Bit set.
+ * Don't clear that. Also, FW delivers broadcast management frames
+ * (ex: group privacy action frames in mesh) as encrypted payload.
+ */
+ if (ieee80211_has_protected(hdr->frame_control) &&
+ !is_multicast_ether_addr(ieee80211_get_DA(hdr))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (!ieee80211_is_robust_mgmt_frame(skb)) {
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+ hdr->frame_control = __cpu_to_le16(fc &
+ ~IEEE80211_FCTL_PROTECTED);
+ }
+ }
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath11k_mac_handle_beacon(ar, skb);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
+ skb, skb->len,
+ fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt rx freq %d band %d snr %d, rate_idx %d\n",
+ status->freq, status->band, status->signal,
+ status->rate_idx);
+
+ ieee80211_rx_ni(ar->hw, skb);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath11k_mgmt_tx_compl_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_mgmt_tx_compl_event tx_compl_param = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_mgmt_tx_compl_param_tlv(ab, skb, &tx_compl_param) != 0) {
+ ath11k_warn(ab, "failed to extract mgmt tx compl event");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, tx_compl_param.pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in mgmt_tx_compl_event\n",
+ tx_compl_param.pdev_id);
+ goto exit;
+ }
+
+ wmi_process_mgmt_tx_comp(ar, &tx_compl_param);
+
+ ath11k_dbg(ab, ATH11K_DBG_MGMT,
+ "event mgmt tx compl ev pdev_id %d, desc_id %d, status %d ack_rssi %d",
+ tx_compl_param.pdev_id, tx_compl_param.desc_id,
+ tx_compl_param.status, tx_compl_param.ack_rssi);
+
+exit:
+ rcu_read_unlock();
+}
+
+static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
+ u32 vdev_id,
+ enum ath11k_scan_state state)
+{
+ int i;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar) {
+ ar = pdev->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.state == state &&
+ ar->scan.vdev_id == vdev_id) {
+ spin_unlock_bh(&ar->data_lock);
+ return ar;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ }
+ return NULL;
+}
+
+static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ struct wmi_scan_event scan_ev = {0};
+
+ if (ath11k_pull_scan_ev(ab, skb, &scan_ev) != 0) {
+ ath11k_warn(ab, "failed to extract scan event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ /* In case the scan was cancelled, ex. during interface teardown,
+ * the interface will not be found in active interfaces.
+ * Rather, in such scenarios, iterate over the active pdev's to
+ * search 'ar' if the corresponding 'ar' scan is ABORTING and the
+ * aborting scan's vdev id matches this event info.
+ */
+ if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
+ scan_ev.reason == WMI_SCAN_REASON_CANCELLED) {
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_ABORTING);
+ if (!ar)
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_RUNNING);
+ } else {
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
+ }
+
+ if (!ar) {
+ ath11k_warn(ab, "Received scan event for unknown vdev");
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event scan %s type %d reason %d freq %d req_id %d scan_id %d vdev_id %d state %s (%d)\n",
+ ath11k_wmi_event_scan_type_str(scan_ev.event_type, scan_ev.reason),
+ scan_ev.event_type, scan_ev.reason, scan_ev.channel_freq,
+ scan_ev.scan_req_id, scan_ev.scan_id, scan_ev.vdev_id,
+ ath11k_scan_state_str(ar->scan.state), ar->scan.state);
+
+ switch (scan_ev.event_type) {
+ case WMI_SCAN_EVENT_STARTED:
+ ath11k_wmi_event_scan_started(ar);
+ break;
+ case WMI_SCAN_EVENT_COMPLETED:
+ ath11k_wmi_event_scan_completed(ar);
+ break;
+ case WMI_SCAN_EVENT_BSS_CHANNEL:
+ ath11k_wmi_event_scan_bss_chan(ar);
+ break;
+ case WMI_SCAN_EVENT_FOREIGN_CHAN:
+ ath11k_wmi_event_scan_foreign_chan(ar, scan_ev.channel_freq);
+ break;
+ case WMI_SCAN_EVENT_START_FAILED:
+ ath11k_warn(ab, "received scan start failure event\n");
+ ath11k_wmi_event_scan_start_failed(ar);
+ break;
+ case WMI_SCAN_EVENT_DEQUEUED:
+ __ath11k_mac_scan_finish(ar);
+ break;
+ case WMI_SCAN_EVENT_PREEMPTED:
+ case WMI_SCAN_EVENT_RESTARTED:
+ case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
+ default:
+ break;
+ }
+
+ spin_unlock_bh(&ar->data_lock);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_peer_sta_kickout_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_sta_kickout_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath11k_peer *peer;
+ struct ath11k *ar;
+ u32 vdev_id;
+
+ if (ath11k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
+ ath11k_warn(ab, "failed to extract peer sta kickout event");
+ return;
+ }
+
+ rcu_read_lock();
+
+ spin_lock_bh(&ab->base_lock);
+
+ peer = ath11k_peer_find_by_addr(ab, arg.mac_addr);
+
+ if (!peer) {
+ ath11k_warn(ab, "peer not found %pM\n",
+ arg.mac_addr);
+ spin_unlock_bh(&ab->base_lock);
+ goto exit;
+ }
+
+ vdev_id = peer->vdev_id;
+
+ spin_unlock_bh(&ab->base_lock);
+
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer sta kickout ev %d",
+ peer->vdev_id);
+ goto exit;
+ }
+
+ sta = ieee80211_find_sta_by_ifaddr(ar->hw,
+ arg.mac_addr, NULL);
+ if (!sta) {
+ ath11k_warn(ab, "Spurious quick kickout for STA %pM\n",
+ arg.mac_addr);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event peer sta kickout %pM",
+ arg.mac_addr);
+
+ ieee80211_report_low_ack(sta, 10);
+
+exit:
+ rcu_read_unlock();
+}
+
+static void ath11k_roam_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_roam_event roam_ev = {};
+ struct ath11k *ar;
+
+ if (ath11k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
+ ath11k_warn(ab, "failed to extract roam event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event roam vdev %u reason 0x%08x rssi %d\n",
+ roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, roam_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in roam ev %d",
+ roam_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ if (roam_ev.reason >= WMI_ROAM_REASON_MAX)
+ ath11k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+
+ switch (roam_ev.reason) {
+ case WMI_ROAM_REASON_BEACON_MISS:
+ ath11k_mac_handle_beacon_miss(ar, roam_ev.vdev_id);
+ break;
+ case WMI_ROAM_REASON_BETTER_AP:
+ case WMI_ROAM_REASON_LOW_RSSI:
+ case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
+ case WMI_ROAM_REASON_HO_FAILED:
+ ath11k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
+ roam_ev.reason, roam_ev.vdev_id);
+ break;
+ }
+
+ rcu_read_unlock();
+}
+
+static void ath11k_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_chan_info_event ch_info_ev = {0};
+ struct ath11k *ar;
+ struct survey_info *survey;
+ int idx;
+ /* HW channel counters frequency value in hertz */
+ u32 cc_freq_hz = ab->cc_freq_hz;
+
+ if (ath11k_pull_chan_info_ev(ab, skb->data, skb->len, &ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract chan info event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event chan info vdev_id %d err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d mac_clk_mhz %d\n",
+ ch_info_ev.vdev_id, ch_info_ev.err_code, ch_info_ev.freq,
+ ch_info_ev.cmd_flags, ch_info_ev.noise_floor,
+ ch_info_ev.rx_clear_count, ch_info_ev.cycle_count,
+ ch_info_ev.mac_clk_mhz);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_END_RESP) {
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "chan info report completed\n");
+ return;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, ch_info_ev.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in chan info ev %d",
+ ch_info_ev.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+ spin_lock_bh(&ar->data_lock);
+
+ switch (ar->scan.state) {
+ case ATH11K_SCAN_IDLE:
+ case ATH11K_SCAN_STARTING:
+ ath11k_warn(ab, "received chan info event without a scan request, ignoring\n");
+ goto exit;
+ case ATH11K_SCAN_RUNNING:
+ case ATH11K_SCAN_ABORTING:
+ break;
+ }
+
+ idx = freq_to_idx(ar, ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "chan info: invalid frequency %d (idx %d out of bounds)\n",
+ ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ /* If FW provides MAC clock frequency in Mhz, overriding the initialized
+ * HW channel counters frequency value
+ */
+ if (ch_info_ev.mac_clk_mhz)
+ cc_freq_hz = (ch_info_ev.mac_clk_mhz * 1000);
+
+ if (ch_info_ev.cmd_flags == WMI_CHAN_INFO_START_RESP) {
+ survey = &ar->survey[idx];
+ memset(survey, 0, sizeof(*survey));
+ survey->noise = ch_info_ev.noise_floor;
+ survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY;
+ survey->time = div_u64(ch_info_ev.cycle_count, cc_freq_hz);
+ survey->time_busy = div_u64(ch_info_ev.rx_clear_count, cc_freq_hz);
+ }
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
+}
+
+static void
+ath11k_pdev_bss_chan_info_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event bss_ch_info_ev = {};
+ struct survey_info *survey;
+ struct ath11k *ar;
+ u32 cc_freq_hz = ab->cc_freq_hz;
+ u64 busy, total, tx, rx, rx_bss;
+ int idx;
+
+ if (ath11k_pull_pdev_bss_chan_info_ev(ab, skb, &bss_ch_info_ev) != 0) {
+ ath11k_warn(ab, "failed to extract pdev bss chan info event");
+ return;
+ }
+
+ busy = (u64)(bss_ch_info_ev.rx_clear_count_high) << 32 |
+ bss_ch_info_ev.rx_clear_count_low;
+
+ total = (u64)(bss_ch_info_ev.cycle_count_high) << 32 |
+ bss_ch_info_ev.cycle_count_low;
+
+ tx = (u64)(bss_ch_info_ev.tx_cycle_count_high) << 32 |
+ bss_ch_info_ev.tx_cycle_count_low;
+
+ rx = (u64)(bss_ch_info_ev.rx_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_cycle_count_low;
+
+ rx_bss = (u64)(bss_ch_info_ev.rx_bss_cycle_count_high) << 32 |
+ bss_ch_info_ev.rx_bss_cycle_count_low;
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event pdev bss chan info:\n pdev_id: %d freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ bss_ch_info_ev.pdev_id, bss_ch_info_ev.freq,
+ bss_ch_info_ev.noise_floor, busy, total,
+ tx, rx, rx_bss);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, bss_ch_info_ev.pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id %d in bss_chan_info event\n",
+ bss_ch_info_ev.pdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, bss_ch_info_ev.freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath11k_warn(ab, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ bss_ch_info_ev.freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = bss_ch_info_ev.noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+
+ rcu_read_unlock();
+}
+
+static void ath11k_vdev_install_key_compl_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct wmi_vdev_install_key_complete_arg install_key_compl = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_vdev_install_key_compl_ev(ab, skb, &install_key_compl) != 0) {
+ ath11k_warn(ab, "failed to extract install key compl event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event vdev install key ev idx %d flags %08x macaddr %pM status %d\n",
+ install_key_compl.key_idx, install_key_compl.key_flags,
+ install_key_compl.macaddr, install_key_compl.status);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, install_key_compl.vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in install key compl ev %d",
+ install_key_compl.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ ar->install_key_status = 0;
+
+ if (install_key_compl.status != WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS) {
+ ath11k_warn(ab, "install key failed for %pM status %d\n",
+ install_key_compl.macaddr, install_key_compl.status);
+ ar->install_key_status = install_key_compl.status;
+ }
+
+ complete(&ar->install_key_done);
+ rcu_read_unlock();
+}
+
+static int ath11k_wmi_tlv_services_parser(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_service_available_event *ev;
+ u32 *wmi_ext2_service_bitmap;
+ int i, j;
+
+ switch (tag) {
+ case WMI_TAG_SERVICE_AVAILABLE_EVENT:
+ ev = (struct wmi_service_available_event *)ptr;
+ for (i = 0, j = WMI_MAX_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT_SERVICE;
+ i++) {
+ do {
+ if (ev->wmi_service_segment_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext_service_bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ ev->wmi_service_segment_bitmap[0],
+ ev->wmi_service_segment_bitmap[1],
+ ev->wmi_service_segment_bitmap[2],
+ ev->wmi_service_segment_bitmap[3]);
+ break;
+ case WMI_TAG_ARRAY_UINT32:
+ wmi_ext2_service_bitmap = (u32 *)ptr;
+ for (i = 0, j = WMI_MAX_EXT_SERVICE;
+ i < WMI_SERVICE_SEGMENT_BM_SIZE32 && j < WMI_MAX_EXT2_SERVICE;
+ i++) {
+ do {
+ if (wmi_ext2_service_bitmap[i] &
+ BIT(j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32))
+ set_bit(j, ab->wmi_ab.svc_map);
+ } while (++j % WMI_AVAIL_SERVICE_BITS_IN_SIZE32);
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi_ext2_service__bitmap 0:0x%04x, 1:0x%04x, 2:0x%04x, 3:0x%04x",
+ wmi_ext2_service_bitmap[0], wmi_ext2_service_bitmap[1],
+ wmi_ext2_service_bitmap[2], wmi_ext2_service_bitmap[3]);
+ break;
+ }
+ return 0;
+}
+
+static void ath11k_service_available_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_services_parser,
+ NULL);
+ if (ret)
+ ath11k_warn(ab, "failed to parse services available tlv %d\n", ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event service available");
+}
+
+static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_peer_assoc_conf_arg peer_assoc_conf = {0};
+ struct ath11k *ar;
+
+ if (ath11k_pull_peer_assoc_conf_ev(ab, skb, &peer_assoc_conf) != 0) {
+ ath11k_warn(ab, "failed to extract peer assoc conf event");
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event peer assoc conf ev vdev id %d macaddr %pM\n",
+ peer_assoc_conf.vdev_id, peer_assoc_conf.macaddr);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, peer_assoc_conf.vdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id in peer assoc conf ev %d",
+ peer_assoc_conf.vdev_id);
+ rcu_read_unlock();
+ return;
+ }
+
+ complete(&ar->peer_assoc_done);
+ rcu_read_unlock();
+}
+
+static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ int ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event update stats");
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ * debugfs fw stats. Therefore, processing it separately.
+ */
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+ ar->fw_stats_done = true;
+ goto complete;
+ }
+
+ /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
+ * are currently requested only via debugfs fw stats. Hence, processing these
+ * in debugfs context
+ */
+ ath11k_debugfs_fw_stats_process(ar, &stats);
+
+complete:
+ complete(&ar->fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+ /* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
+ * at this point, no need to free the individual list.
+ */
+ return;
+
+free:
+ ath11k_fw_stats_free(&stats);
+}
+
+/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
+ * is not part of BDF CTL(Conformance test limits) table entries.
+ */
+static void ath11k_pdev_ctl_failsafe_check_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_ctl_failsafe_chk_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev ctl failsafe check ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event pdev ctl failsafe check status %d\n",
+ ev->ctl_failsafe_status);
+
+ /* If ctl_failsafe_status is set to 1 FW will max out the Transmit power
+ * to 10 dBm else the CTL power entry in the BDF would be picked up.
+ */
+ if (ev->ctl_failsafe_status != 0)
+ ath11k_warn(ab, "pdev ctl failsafe failure status %d",
+ ev->ctl_failsafe_status);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_process_csa_switch_count_event(struct ath11k_base *ab,
+ const struct wmi_pdev_csa_switch_ev *ev,
+ const u32 *vdev_ids)
+{
+ int i;
+ struct ath11k_vif *arvif;
+
+ /* Finish CSA once the switch count becomes NULL */
+ if (ev->current_switch_count)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < ev->num_vdevs; i++) {
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
+
+ if (!arvif) {
+ ath11k_warn(ab, "Recvd csa status for unknown vdev %d",
+ vdev_ids[i]);
+ continue;
+ }
+
+ if (arvif->is_up && arvif->vif->bss_conf.csa_active)
+ ieee80211_csa_finish(arvif->vif);
+ }
+ rcu_read_unlock();
+}
+
+static void
+ath11k_wmi_pdev_csa_switch_count_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_csa_switch_ev *ev;
+ const u32 *vdev_ids;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT];
+ vdev_ids = tb[WMI_TAG_ARRAY_UINT32];
+
+ if (!ev || !vdev_ids) {
+ ath11k_warn(ab, "failed to fetch pdev csa switch count ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event pdev csa switch count %d for pdev %d, num_vdevs %d",
+ ev->current_switch_count, ev->pdev_id,
+ ev->num_vdevs);
+
+ ath11k_wmi_process_csa_switch_count_event(ab, ev, vdev_ids);
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_pdev_dfs_radar_detected_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_pdev_radar_ev *ev;
+ struct ath11k *ar;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT];
+
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev dfs radar detected ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "event pdev dfs radar detected on pdev %d, detection mode %d, chan freq %d, chan_width %d, detector id %d, seg id %d, timestamp %d, chirp %d, freq offset %d, sidx %d",
+ ev->pdev_id, ev->detection_mode, ev->chan_freq, ev->chan_width,
+ ev->detector_id, ev->segment_id, ev->timestamp, ev->is_chirp,
+ ev->freq_offset, ev->sidx);
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+
+ if (!ar) {
+ ath11k_warn(ab, "radar detected in invalid pdev %d\n",
+ ev->pdev_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "DFS Radar Detected in pdev %d\n",
+ ev->pdev_id);
+
+ if (ar->dfs_block_radar_events)
+ ath11k_info(ab, "DFS Radar detected, but ignored as requested\n");
+ else
+ ieee80211_radar_detected(ar->hw);
+
+exit:
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
+static void
+ath11k_wmi_pdev_temperature_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath11k *ar;
+ const void **tb;
+ const struct wmi_pdev_temperature_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch pdev temp ev");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event pdev temperature ev temp %d pdev_id %d\n",
+ ev->temp, ev->pdev_id);
+
+ rcu_read_lock();
+
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, ev->pdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid pdev id in pdev temperature ev %d", ev->pdev_id);
+ goto exit;
+ }
+
+ ath11k_thermal_event_temperature(ar, ev->temp);
+
+exit:
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
+static void ath11k_fils_discovery_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_fils_discovery_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse FILS discovery event tlv %d\n",
+ ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event fils discovery");
+
+ ev = tb[WMI_TAG_HOST_SWFDA_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch FILS discovery event\n");
+ kfree(tb);
+ return;
+ }
+
+ ath11k_warn(ab,
+ "FILS discovery frame expected from host for vdev_id: %u, transmission scheduled at %u, next TBTT: %u\n",
+ ev->vdev_id, ev->fils_tt, ev->tbtt);
+
+ kfree(tb);
+}
+
+static void ath11k_probe_resp_tx_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_probe_resp_tx_status_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse probe response transmission status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event probe resp tx status");
+
+ ev = tb[WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab,
+ "failed to fetch probe response transmission status event");
+ kfree(tb);
+ return;
+ }
+
+ if (ev->tx_status)
+ ath11k_warn(ab,
+ "Probe response transmission failed for vdev_id %u, status %u\n",
+ ev->vdev_id, ev->tx_status);
+
+ kfree(tb);
+}
+
+static int ath11k_wmi_tlv_wow_wakeup_host_parse(struct ath11k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ struct wmi_wow_ev_arg *ev = data;
+ const char *wow_pg_fault;
+ int wow_pg_len;
+
+ switch (tag) {
+ case WMI_TAG_WOW_EVENT_INFO:
+ memcpy(ev, ptr, sizeof(*ev));
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wow wakeup host reason %d %s\n",
+ ev->wake_reason, wow_reason(ev->wake_reason));
+ break;
+
+ case WMI_TAG_ARRAY_BYTE:
+ if (ev && ev->wake_reason == WOW_REASON_PAGE_FAULT) {
+ wow_pg_fault = ptr;
+ /* the first 4 bytes are length */
+ wow_pg_len = *(int *)wow_pg_fault;
+ wow_pg_fault += sizeof(int);
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "wow data_len = %d\n",
+ wow_pg_len);
+ ath11k_dbg_dump(ab, ATH11K_DBG_WMI,
+ "wow_event_info_type packet present",
+ "wow_pg_fault ",
+ wow_pg_fault,
+ wow_pg_len);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void ath11k_wmi_event_wow_wakeup_host(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_wow_ev_arg ev = { };
+ int ret;
+
+ ret = ath11k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath11k_wmi_tlv_wow_wakeup_host_parse,
+ &ev);
+ if (ret) {
+ ath11k_warn(ab, "failed to parse wmi wow tlv: %d\n", ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event wow wakeup host");
+
+ complete(&ab->wow.wakeup_completed);
+}
+
+static void
+ath11k_wmi_diag_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event diag");
+
+ trace_ath11k_wmi_diag(ab, skb->data, skb->len);
+}
+
+static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
+{
+ switch (status) {
+ case WMI_ADD_TWT_STATUS_OK:
+ return "ok";
+ case WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED:
+ return "twt disabled";
+ case WMI_ADD_TWT_STATUS_USED_DIALOG_ID:
+ return "dialog id in use";
+ case WMI_ADD_TWT_STATUS_INVALID_PARAM:
+ return "invalid parameters";
+ case WMI_ADD_TWT_STATUS_NOT_READY:
+ return "not ready";
+ case WMI_ADD_TWT_STATUS_NO_RESOURCE:
+ return "resource unavailable";
+ case WMI_ADD_TWT_STATUS_NO_ACK:
+ return "no ack";
+ case WMI_ADD_TWT_STATUS_NO_RESPONSE:
+ return "no response";
+ case WMI_ADD_TWT_STATUS_DENIED:
+ return "denied";
+ case WMI_ADD_TWT_STATUS_UNKNOWN_ERROR:
+ fallthrough;
+ default:
+ return "unknown error";
+ }
+}
+
+static void ath11k_wmi_twt_add_dialog_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_twt_add_dialog_event *ev;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab,
+ "failed to parse wmi twt add dialog status event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event twt add dialog");
+
+ ev = tb[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch twt add dialog wmi event\n");
+ goto exit;
+ }
+
+ if (ev->status)
+ ath11k_warn(ab,
+ "wmi add twt dialog event vdev %d dialog id %d status %s\n",
+ ev->vdev_id, ev->dialog_id,
+ ath11k_wmi_twt_add_dialog_event_status(ev->status));
+
+exit:
+ kfree(tb);
+}
+
+static void ath11k_wmi_gtk_offload_status_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath11k_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ int ret;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb->data, skb->len, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath11k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath11k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ rcu_read_lock();
+
+ arvif = ath11k_mac_get_arvif_by_vdev_id(ab, ev->vdev_id);
+ if (!arvif) {
+ ath11k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ ev->vdev_id);
+ goto exit;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "event gtk offload refresh_cnt %d\n",
+ ev->refresh_cnt);
+ ath11k_dbg_dump(ab, ATH11K_DBG_WMI, "replay_cnt",
+ NULL, ev->replay_ctr.counter, GTK_REPLAY_COUNTER_BYTES);
+
+ replay_ctr = ev->replay_ctr.word1;
+ replay_ctr = (replay_ctr << 32) | ev->replay_ctr.word0;
+ arvif->rekey_data.replay_ctr = replay_ctr;
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+exit:
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
+static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_cmd_hdr *cmd_hdr;
+ enum wmi_tlv_event_id id;
+
+ cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
+ id = FIELD_GET(WMI_CMD_HDR_CMD_ID, (cmd_hdr->cmd_id));
+
+ trace_ath11k_wmi_event(ab, id, skb->data, skb->len);
+
+ if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
+ goto out;
+
+ switch (id) {
+ /* Process all the WMI events here */
+ case WMI_SERVICE_READY_EVENTID:
+ ath11k_service_ready_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT_EVENTID:
+ ath11k_service_ready_ext_event(ab, skb);
+ break;
+ case WMI_SERVICE_READY_EXT2_EVENTID:
+ ath11k_service_ready_ext2_event(ab, skb);
+ break;
+ case WMI_REG_CHAN_LIST_CC_EVENTID:
+ ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_ID);
+ break;
+ case WMI_REG_CHAN_LIST_CC_EXT_EVENTID:
+ ath11k_reg_chan_list_event(ab, skb, WMI_REG_CHAN_LIST_CC_EXT_ID);
+ break;
+ case WMI_READY_EVENTID:
+ ath11k_ready_event(ab, skb);
+ break;
+ case WMI_PEER_DELETE_RESP_EVENTID:
+ ath11k_peer_delete_resp_event(ab, skb);
+ break;
+ case WMI_VDEV_START_RESP_EVENTID:
+ ath11k_vdev_start_resp_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_BCN_TX_STATUS_EVENTID:
+ ath11k_bcn_tx_status_event(ab, skb);
+ break;
+ case WMI_VDEV_STOPPED_EVENTID:
+ ath11k_vdev_stopped_event(ab, skb);
+ break;
+ case WMI_MGMT_RX_EVENTID:
+ ath11k_mgmt_rx_event(ab, skb);
+ /* mgmt_rx_event() owns the skb now! */
+ return;
+ case WMI_MGMT_TX_COMPLETION_EVENTID:
+ ath11k_mgmt_tx_compl_event(ab, skb);
+ break;
+ case WMI_SCAN_EVENTID:
+ ath11k_scan_event(ab, skb);
+ break;
+ case WMI_PEER_STA_KICKOUT_EVENTID:
+ ath11k_peer_sta_kickout_event(ab, skb);
+ break;
+ case WMI_ROAM_EVENTID:
+ ath11k_roam_event(ab, skb);
+ break;
+ case WMI_CHAN_INFO_EVENTID:
+ ath11k_chan_info_event(ab, skb);
+ break;
+ case WMI_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath11k_pdev_bss_chan_info_event(ab, skb);
+ break;
+ case WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID:
+ ath11k_vdev_install_key_compl_event(ab, skb);
+ break;
+ case WMI_SERVICE_AVAILABLE_EVENTID:
+ ath11k_service_available_event(ab, skb);
+ break;
+ case WMI_PEER_ASSOC_CONF_EVENTID:
+ ath11k_peer_assoc_conf_event(ab, skb);
+ break;
+ case WMI_UPDATE_STATS_EVENTID:
+ ath11k_update_stats_event(ab, skb);
+ break;
+ case WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID:
+ ath11k_pdev_ctl_failsafe_check_event(ab, skb);
+ break;
+ case WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID:
+ ath11k_wmi_pdev_csa_switch_count_status_event(ab, skb);
+ break;
+ case WMI_PDEV_UTF_EVENTID:
+ ath11k_tm_wmi_event(ab, id, skb);
+ break;
+ case WMI_PDEV_TEMPERATURE_EVENTID:
+ ath11k_wmi_pdev_temperature_event(ab, skb);
+ break;
+ case WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID:
+ ath11k_wmi_pdev_dma_ring_buf_release_event(ab, skb);
+ break;
+ case WMI_HOST_FILS_DISCOVERY_EVENTID:
+ ath11k_fils_discovery_event(ab, skb);
+ break;
+ case WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID:
+ ath11k_probe_resp_tx_status_event(ab, skb);
+ break;
+ case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+ ath11k_wmi_obss_color_collision_event(ab, skb);
+ break;
+ case WMI_TWT_ADD_DIALOG_EVENTID:
+ ath11k_wmi_twt_add_dialog_event(ab, skb);
+ break;
+ case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
+ ath11k_wmi_pdev_dfs_radar_detected_event(ab, skb);
+ break;
+ case WMI_VDEV_DELETE_RESP_EVENTID:
+ ath11k_vdev_delete_resp_event(ab, skb);
+ break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath11k_wmi_event_wow_wakeup_host(ab, skb);
+ break;
+ case WMI_11D_NEW_COUNTRY_EVENTID:
+ ath11k_reg_11d_new_cc_event(ab, skb);
+ break;
+ case WMI_DIAG_EVENTID:
+ ath11k_wmi_diag_event(ab, skb);
+ break;
+ case WMI_PEER_STA_PS_STATECHG_EVENTID:
+ ath11k_wmi_event_peer_sta_ps_state_chg(ab, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath11k_wmi_gtk_offload_status_event(ab, skb);
+ break;
+ default:
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id);
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+}
+
+static int ath11k_connect_pdev_htc_service(struct ath11k_base *ab,
+ u32 pdev_idx)
+{
+ int status;
+ u32 svc_id[] = { ATH11K_HTC_SVC_ID_WMI_CONTROL,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1,
+ ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2 };
+
+ struct ath11k_htc_svc_conn_req conn_req;
+ struct ath11k_htc_svc_conn_resp conn_resp;
+
+ memset(&conn_req, 0, sizeof(conn_req));
+ memset(&conn_resp, 0, sizeof(conn_resp));
+
+ /* these fields are the same for all service endpoints */
+ conn_req.ep_ops.ep_tx_complete = ath11k_wmi_htc_tx_complete;
+ conn_req.ep_ops.ep_rx_complete = ath11k_wmi_tlv_op_rx;
+ conn_req.ep_ops.ep_tx_credits = ath11k_wmi_op_ep_tx_credits;
+
+ /* connect to control service */
+ conn_req.service_id = svc_id[pdev_idx];
+
+ status = ath11k_htc_connect_service(&ab->htc, &conn_req, &conn_resp);
+ if (status) {
+ ath11k_warn(ab, "failed to connect to WMI CONTROL service status: %d\n",
+ status);
+ return status;
+ }
+
+ ab->wmi_ab.wmi_endpoint_id[pdev_idx] = conn_resp.eid;
+ ab->wmi_ab.wmi[pdev_idx].eid = conn_resp.eid;
+ ab->wmi_ab.max_msg_len[pdev_idx] = conn_resp.max_msg_len;
+ init_waitqueue_head(&ab->wmi_ab.wmi[pdev_idx].tx_ce_desc_wq);
+
+ return 0;
+}
+
+static int
+ath11k_wmi_send_unit_test_cmd(struct ath11k *ar,
+ struct wmi_unit_test_cmd ut_cmd,
+ u32 *test_args)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_unit_test_cmd *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ void *ptr;
+ u32 *ut_cmd_args;
+ int buf_len, arg_len;
+ int ret;
+ int i;
+
+ arg_len = sizeof(u32) * ut_cmd.num_args;
+ buf_len = sizeof(ut_cmd) + arg_len + TLV_HDR_SIZE;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_unit_test_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_UNIT_TEST_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(ut_cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = ut_cmd.vdev_id;
+ cmd->module_id = ut_cmd.module_id;
+ cmd->num_args = ut_cmd.num_args;
+ cmd->diag_token = ut_cmd.diag_token;
+
+ ptr = skb->data + sizeof(ut_cmd);
+
+ tlv = ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, arg_len);
+
+ ptr += TLV_HDR_SIZE;
+
+ ut_cmd_args = ptr;
+ for (i = 0; i < ut_cmd.num_args; i++)
+ ut_cmd_args[i] = test_args[i];
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_UNIT_TEST_CMDID);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_UNIT_TEST CMD :%d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "cmd unit test module %d vdev %d n_args %d token %d\n",
+ cmd->module_id, cmd->vdev_id, cmd->num_args,
+ cmd->diag_token);
+
+ return ret;
+}
+
+int ath11k_wmi_simulate_radar(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ u32 dfs_args[DFS_MAX_TEST_ARGS];
+ struct wmi_unit_test_cmd wmi_ut;
+ bool arvif_found = false;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_started && arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ arvif_found = true;
+ break;
+ }
+ }
+
+ if (!arvif_found)
+ return -EINVAL;
+
+ dfs_args[DFS_TEST_CMDID] = 0;
+ dfs_args[DFS_TEST_PDEV_ID] = ar->pdev->pdev_id;
+ /* Currently we could pass segment_id(b0 - b1), chirp(b2)
+ * freq offset (b3 - b10) to unit test. For simulation
+ * purpose this can be set to 0 which is valid.
+ */
+ dfs_args[DFS_TEST_RADAR_PARAM] = 0;
+
+ wmi_ut.vdev_id = arvif->vdev_id;
+ wmi_ut.module_id = DFS_UNIT_TEST_MODULE;
+ wmi_ut.num_args = DFS_MAX_TEST_ARGS;
+ wmi_ut.diag_token = DFS_UNIT_TEST_TOKEN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Triggering Radar Simulation\n");
+
+ return ath11k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
+}
+
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_debug_log_config_cmd_fixed_param *cmd;
+ struct sk_buff *skb;
+ struct wmi_tlv *tlv;
+ int ret, len;
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + (MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_debug_log_config_cmd_fixed_param *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_DEBUG_LOG_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->dbg_log_param = dbglog->param;
+
+ tlv = (struct wmi_tlv *)((u8 *)cmd + sizeof(*cmd));
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+
+ switch (dbglog->param) {
+ case WMI_DEBUG_LOG_PARAM_LOG_LEVEL:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_DISABLE:
+ case WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ break;
+ case WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP:
+ case WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP:
+ cmd->value = dbglog->value;
+ memcpy(tlv->value, module_id_bitmap,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ /* clear current config to be used for next user config */
+ memset(module_id_bitmap, 0,
+ MAX_MODULE_ID_BITMAP_WORDS * sizeof(u32));
+ break;
+ default:
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_DBGLOG_CFG_CMDID);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to send WMI_DBGLOG_CFG_CMDID\n");
+ dev_kfree_skb(skb);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "cmd dbglog cfg");
+
+ return ret;
+}
+
+int ath11k_wmi_connect(struct ath11k_base *ab)
+{
+ u32 i;
+ u8 wmi_ep_count;
+
+ wmi_ep_count = ab->htc.wmi_ep_count;
+ if (wmi_ep_count > ab->hw_params.max_radios)
+ return -1;
+
+ for (i = 0; i < wmi_ep_count; i++)
+ ath11k_connect_pdev_htc_service(ab, i);
+
+ return 0;
+}
+
+static void ath11k_wmi_pdev_detach(struct ath11k_base *ab, u8 pdev_id)
+{
+ if (WARN_ON(pdev_id >= MAX_RADIOS))
+ return;
+
+ /* TODO: Deinit any pdev specific wmi resource */
+}
+
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id)
+{
+ struct ath11k_pdev_wmi *wmi_handle;
+
+ if (pdev_id >= ab->hw_params.max_radios)
+ return -EINVAL;
+
+ wmi_handle = &ab->wmi_ab.wmi[pdev_id];
+
+ wmi_handle->wmi_ab = &ab->wmi_ab;
+
+ ab->wmi_ab.ab = ab;
+ /* TODO: Init remaining resource specific to pdev */
+
+ return 0;
+}
+
+int ath11k_wmi_attach(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_wmi_pdev_attach(ab, 0);
+ if (ret)
+ return ret;
+
+ ab->wmi_ab.ab = ab;
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+
+ /* It's overwritten when service_ext_ready is handled */
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
+
+ /* TODO: Init remaining wmi soc resources required */
+ init_completion(&ab->wmi_ab.service_ready);
+ init_completion(&ab->wmi_ab.unified_ready);
+
+ return 0;
+}
+
+void ath11k_wmi_detach(struct ath11k_base *ab)
+{
+ int i;
+
+ /* TODO: Deinit wmi resource specific to SOC as required */
+
+ for (i = 0; i < ab->htc.wmi_ep_count; i++)
+ ath11k_wmi_pdev_detach(ab, i);
+
+ ath11k_wmi_free_dbring_caps(ab);
+}
+
+int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 filter_bitmap, bool enable)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_HW_DATA_FILTER_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->enable = enable;
+
+ /* Set all modes in case of disable */
+ if (cmd->enable)
+ cmd->hw_filter_bitmap = filter_bitmap;
+ else
+ cmd->hw_filter_bitmap = ((u32)~0U);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "hw data filter enable %d filter_bitmap 0x%x\n",
+ enable, filter_bitmap);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
+int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar)
+{
+ struct wmi_wow_host_wakeup_ind *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_host_wakeup_ind *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow host wakeup ind\n");
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+}
+
+int ath11k_wmi_wow_enable(struct ath11k *ar)
+{
+ struct wmi_wow_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_enable_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ENABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->enable = 1;
+ cmd->pause_iface_config = WOW_IFACE_PAUSE_ENABLED;
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow enable\n");
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
+}
+
+int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
+ const u8 mac_addr[ETH_ALEN])
+{
+ struct sk_buff *skb;
+ struct wmi_scan_prob_req_oui_cmd *cmd;
+ u32 prob_req_oui;
+ int len;
+
+ prob_req_oui = (((u32)mac_addr[0]) << 16) |
+ (((u32)mac_addr[1]) << 8) | mac_addr[2];
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_scan_prob_req_oui_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->prob_req_oui = prob_req_oui;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "scan prob req oui %d\n",
+ prob_req_oui);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SCAN_PROB_REQ_OUI_CMDID);
+}
+
+int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_WOW_ADD_DEL_EVT_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->is_add = enable;
+ cmd->event_bitmap = (1 << event);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_add_pattern_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_ADD_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = (struct wmi_wow_bitmap_pattern *)ptr;
+ bitmap->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_BITMAP_PATTERN_T) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*bitmap) - TLV_HDR_SIZE);
+
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ ath11k_ce_byte_swap(bitmap->patternbuf, roundup(pattern_len, 4));
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ ath11k_ce_byte_swap(bitmap->bitmaskbuf, roundup(pattern_len, 4));
+ bitmap->pattern_offset = pattern_offset;
+ bitmap->pattern_len = pattern_len;
+ bitmap->bitmask_len = pattern_len;
+ bitmap->pattern_id = pattern_id;
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(u32));
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d\n",
+ vdev_id, pattern_id, pattern_offset);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_WOW_DEL_PATTERN_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->pattern_id = pattern_id;
+ cmd->pattern_type = WOW_BITMAP_PATTERN;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath11k_wmi_op_gen_config_pno_start(struct ath11k *ar,
+ u32 vdev_id,
+ struct wmi_pno_scan_req *pno)
+{
+ struct nlo_configured_parameters *nlo_list;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u32 *channel_list;
+ size_t len, nlo_list_len, channel_list_len;
+ u8 *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_parameters(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = (u8 *)skb->data;
+ cmd = (struct wmi_wow_nlo_config_cmd *)ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = pno->vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN;
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = pno->active_max_time;
+ cmd->passive_dwell_time = pno->passive_max_time;
+
+ if (pno->do_passive_scan)
+ cmd->flags |= WMI_NLO_CONFIG_SCAN_PASSIVE;
+
+ cmd->fast_scan_period = pno->fast_scan_period;
+ cmd->slow_scan_period = pno->slow_scan_period;
+ cmd->fast_scan_max_cycles = pno->fast_scan_max_cycles;
+ cmd->delay_start_time = pno->delay_start_time;
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ;
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ ath11k_ce_byte_swap(cmd->mac_addr.addr, 8);
+ ath11k_ce_byte_swap(cmd->mac_mask.addr, 8);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_parameters(nlo_list) */
+ cmd->no_of_ssids = pno->uc_networks_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = (struct nlo_configured_parameters *)ptr;
+ for (i = 0; i < cmd->no_of_ssids; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*nlo_list) - sizeof(*tlv));
+
+ nlo_list[i].ssid.valid = true;
+ nlo_list[i].ssid.ssid.ssid_len = pno->a_networks[i].ssid.ssid_len;
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ nlo_list[i].ssid.ssid.ssid_len);
+ ath11k_ce_byte_swap(nlo_list[i].ssid.ssid.ssid,
+ roundup(nlo_list[i].ssid.ssid.ssid_len, 4));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = true;
+ nlo_list[i].rssi_cond.rssi =
+ pno->a_networks[i].rssi_threshold;
+ }
+
+ nlo_list[i].bcast_nw_type.valid = true;
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ pno->a_networks[i].bcast_nw_type;
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = pno->a_networks[0].channel_count;
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_UINT32) |
+ FIELD_PREP(WMI_TLV_LEN, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = (u32 *)ptr;
+ for (i = 0; i < cmd->num_of_channels; i++)
+ channel_list[i] = pno->a_networks[0].channels[i];
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath11k_wmi_op_gen_config_pno_stop(struct ath11k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NLO_CONFIG_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, len - TLV_HDR_SIZE);
+
+ cmd->vdev_id = vdev_id;
+ cmd->flags = WMI_NLO_CONFIG_STOP;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath11k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath11k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath11k_wmi_fill_ns_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_tuple *ns;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = (struct wmi_tlv *)buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = (struct wmi_ns_offload_tuple *)buf_ptr;
+ ns->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_NS_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*ns) - TLV_HDR_SIZE);
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= WMI_NSOL_FLAGS_VALID;
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+ ath11k_ce_byte_swap(ns->target_ipaddr[0], 16);
+ ath11k_ce_byte_swap(ns->solicitation_ipaddr, 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= WMI_NSOL_FLAGS_IS_IPV6_ANYCAST;
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+ ath11k_ce_byte_swap(ns->target_mac.addr, 8);
+
+ if (ns->target_mac.word0 != 0 ||
+ ns->target_mac.word1 != 0) {
+ ns->flags |= WMI_NSOL_FLAGS_MAC_VALID;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath11k_wmi_fill_arp_offload(struct ath11k *ar,
+ struct ath11k_arp_ns_offload *offload,
+ u8 **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_tuple *arp;
+ struct wmi_tlv *tlv;
+ u8 *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_STRUCT) |
+ FIELD_PREP(WMI_TLV_LEN, WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = (struct wmi_arp_offload_tuple *)buf_ptr;
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARP_OFFLOAD_TUPLE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = WMI_ARPOL_FLAGS_VALID;
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+ ath11k_ce_byte_swap(arp->target_ipaddr, 4);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct ath11k_arp_ns_offload *offload;
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ offload = &arvif->arp_ns_offload;
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_tuple) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_tuple);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_tuple);
+ }
+
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = (struct wmi_set_arp_ns_offload_cmd *)buf_ptr;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->flags = 0;
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->num_ns_ext_tuples = ns_ext_tuples;
+
+ buf_ptr += sizeof(*cmd);
+
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath11k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath11k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct ath11k_rekey_data *rekey_data = &arvif->rekey_data;
+ int len;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+
+ if (enable) {
+ cmd->flags = GTK_OFFLOAD_ENABLE_OPCODE;
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ ath11k_ce_byte_swap(cmd->kck, GTK_OFFLOAD_KEK_BYTES);
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+ ath11k_ce_byte_swap(cmd->kek, GTK_OFFLOAD_KEK_BYTES);
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ ath11k_ce_byte_swap(cmd->replay_ctr, GTK_REPLAY_COUNTER_BYTES);
+ } else {
+ cmd->flags = GTK_OFFLOAD_DISABLE_OPCODE;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
+ struct ath11k_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ int len;
+ struct sk_buff *skb;
+
+ len = sizeof(*cmd);
+ skb = ath11k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_GTK_OFFLOAD_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+
+ cmd->vdev_id = arvif->vdev_id;
+ cmd->flags = GTK_OFFLOAD_REQUEST_STATUS_OPCODE;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath11k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val)
+{ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_sar_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, sar_len_aligned, rsvd_len_aligned;
+
+ sar_len_aligned = roundup(BIOS_SAR_TABLE_LEN, sizeof(u32));
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD1_LEN, sizeof(u32));
+ len = sizeof(*cmd) +
+ TLV_HDR_SIZE + sar_len_aligned +
+ TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_sar_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->sar_len = BIOS_SAR_TABLE_LEN;
+ cmd->rsvd_len = BIOS_SAR_RSVD1_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, sar_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, sar_val, BIOS_SAR_TABLE_LEN);
+
+ buf_ptr += sar_len_aligned;
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
+}
+
+int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_pdev_set_geo_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *buf_ptr;
+ u32 len, rsvd_len_aligned;
+
+ rsvd_len_aligned = roundup(BIOS_SAR_RSVD2_LEN, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + rsvd_len_aligned;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_geo_table_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->pdev_id = ar->pdev->pdev_id;
+ cmd->rsvd_len = BIOS_SAR_RSVD2_LEN;
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, rsvd_len_aligned);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
+}
+
+int ath11k_wmi_sta_keepalive(struct ath11k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct wmi_sta_keepalive_arp_resp *arp;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_CMD) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = arg->vdev_id;
+ cmd->enabled = arg->enabled;
+ cmd->interval = arg->interval;
+ cmd->method = arg->method;
+
+ arp = (struct wmi_sta_keepalive_arp_resp *)(cmd + 1);
+ arp->tlv_header = FIELD_PREP(WMI_TLV_TAG,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*arp) - TLV_HDR_SIZE);
+
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp->src_ip4_addr = arg->src_ip4_addr;
+ arp->dest_ip4_addr = arg->dest_ip4_addr;
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
+ "sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath11k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
new file mode 100644
index 0000000000..100bb816b5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -0,0 +1,6508 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_WMI_H
+#define ATH11K_WMI_H
+
+#include <net/mac80211.h>
+#include "htc.h"
+
+struct ath11k_base;
+struct ath11k;
+struct ath11k_fw_stats;
+struct ath11k_fw_dbglog;
+struct ath11k_vif;
+
+#define PSOC_HOST_MAX_NUM_SS (8)
+
+/* defines to set Packet extension values which can be 0 us, 8 usec or 16 usec */
+#define MAX_HE_NSS 8
+#define MAX_HE_MODULATION 8
+#define MAX_HE_RU 4
+#define HE_MODULATION_NONE 7
+#define HE_PET_0_USEC 0
+#define HE_PET_8_USEC 1
+#define HE_PET_16_USEC 2
+
+#define WMI_MAX_CHAINS 8
+
+#define WMI_MAX_NUM_SS MAX_HE_NSS
+#define WMI_MAX_NUM_RU MAX_HE_RU
+
+#define WMI_TLV_CMD(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_EV(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_TLV_CMD_UNSUPPORTED 0
+#define WMI_TLV_PDEV_PARAM_UNSUPPORTED 0
+#define WMI_TLV_VDEV_PARAM_UNSUPPORTED 0
+
+struct wmi_cmd_hdr {
+ u32 cmd_id;
+} __packed;
+
+struct wmi_tlv {
+ u32 header;
+ u8 value[];
+} __packed;
+
+#define WMI_TLV_LEN GENMASK(15, 0)
+#define WMI_TLV_TAG GENMASK(31, 16)
+#define TLV_HDR_SIZE sizeof_field(struct wmi_tlv, header)
+
+#define WMI_CMD_HDR_CMD_ID GENMASK(23, 0)
+#define WMI_MAX_MEM_REQS 32
+#define ATH11K_MAX_HW_LISTEN_INTERVAL 5
+
+#define WLAN_SCAN_MAX_HINT_S_SSID 10
+#define WLAN_SCAN_MAX_HINT_BSSID 10
+#define MAX_RNR_BSS 5
+
+#define WLAN_SCAN_MAX_HINT_S_SSID 10
+#define WLAN_SCAN_MAX_HINT_BSSID 10
+#define MAX_RNR_BSS 5
+
+#define WLAN_SCAN_PARAMS_MAX_SSID 16
+#define WLAN_SCAN_PARAMS_MAX_BSSID 4
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+
+#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
+
+#define MAX_WMI_UTF_LEN 252
+#define WMI_BA_MODE_BUFFER_SIZE_256 3
+/*
+ * HW mode config type replicated from FW header
+ * @WMI_HOST_HW_MODE_SINGLE: Only one PHY is active.
+ * @WMI_HOST_HW_MODE_DBS: Both PHYs are active in different bands,
+ * one in 2G and another in 5G.
+ * @WMI_HOST_HW_MODE_SBS_PASSIVE: Both PHYs are in passive mode (only rx) in
+ * same band; no tx allowed.
+ * @WMI_HOST_HW_MODE_SBS: Both PHYs are active in the same band.
+ * Support for both PHYs within one band is planned
+ * for 5G only(as indicated in WMI_MAC_PHY_CAPABILITIES),
+ * but could be extended to other bands in the future.
+ * The separation of the band between the two PHYs needs
+ * to be communicated separately.
+ * @WMI_HOST_HW_MODE_DBS_SBS: 3 PHYs, with 2 on the same band doing SBS
+ * as in WMI_HW_MODE_SBS, and 3rd on the other band
+ * @WMI_HOST_HW_MODE_DBS_OR_SBS: Two PHY with one PHY capabale of both 2G and
+ * 5G. It can support SBS (5G + 5G) OR DBS (5G + 2G).
+ * @WMI_HOST_HW_MODE_MAX: Max hw_mode_id. Used to indicate invalid mode.
+ */
+enum wmi_host_hw_mode_config_type {
+ WMI_HOST_HW_MODE_SINGLE = 0,
+ WMI_HOST_HW_MODE_DBS = 1,
+ WMI_HOST_HW_MODE_SBS_PASSIVE = 2,
+ WMI_HOST_HW_MODE_SBS = 3,
+ WMI_HOST_HW_MODE_DBS_SBS = 4,
+ WMI_HOST_HW_MODE_DBS_OR_SBS = 5,
+
+ /* keep last */
+ WMI_HOST_HW_MODE_MAX
+};
+
+/* HW mode priority values used to detect the preferred HW mode
+ * on the available modes.
+ */
+enum wmi_host_hw_mode_priority {
+ WMI_HOST_HW_MODE_DBS_SBS_PRI,
+ WMI_HOST_HW_MODE_DBS_PRI,
+ WMI_HOST_HW_MODE_DBS_OR_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PRI,
+ WMI_HOST_HW_MODE_SBS_PASSIVE_PRI,
+ WMI_HOST_HW_MODE_SINGLE_PRI,
+
+ /* keep last the lowest priority */
+ WMI_HOST_HW_MODE_MAX_PRI
+};
+
+enum WMI_HOST_WLAN_BAND {
+ WMI_HOST_WLAN_2G_CAP = 0x1,
+ WMI_HOST_WLAN_5G_CAP = 0x2,
+ WMI_HOST_WLAN_2G_5G_CAP = WMI_HOST_WLAN_2G_CAP | WMI_HOST_WLAN_5G_CAP,
+};
+
+/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
+ * Used only for HE auto rate mode.
+ */
+enum {
+ /* HE LTF related configuration */
+ WMI_HE_AUTORATE_LTF_1X = BIT(0),
+ WMI_HE_AUTORATE_LTF_2X = BIT(1),
+ WMI_HE_AUTORATE_LTF_4X = BIT(2),
+
+ /* HE GI related configuration */
+ WMI_AUTORATE_400NS_GI = BIT(8),
+ WMI_AUTORATE_800NS_GI = BIT(9),
+ WMI_AUTORATE_1600NS_GI = BIT(10),
+ WMI_AUTORATE_3200NS_GI = BIT(11),
+};
+
+enum {
+ WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP = 0x00000001,
+ WMI_HOST_VDEV_FLAGS_TRANSMIT_AP = 0x00000002,
+ WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP = 0x00000004,
+ WMI_HOST_VDEV_FLAGS_EMA_MODE = 0x00000008,
+ WMI_HOST_VDEV_FLAGS_SCAN_MODE_VAP = 0x00000010,
+};
+
+/*
+ * wmi command groups.
+ */
+enum wmi_cmd_group {
+ /* 0 to 2 are reserved */
+ WMI_GRP_START = 0x3,
+ WMI_GRP_SCAN = WMI_GRP_START,
+ WMI_GRP_PDEV = 0x4,
+ WMI_GRP_VDEV = 0x5,
+ WMI_GRP_PEER = 0x6,
+ WMI_GRP_MGMT = 0x7,
+ WMI_GRP_BA_NEG = 0x8,
+ WMI_GRP_STA_PS = 0x9,
+ WMI_GRP_DFS = 0xa,
+ WMI_GRP_ROAM = 0xb,
+ WMI_GRP_OFL_SCAN = 0xc,
+ WMI_GRP_P2P = 0xd,
+ WMI_GRP_AP_PS = 0xe,
+ WMI_GRP_RATE_CTRL = 0xf,
+ WMI_GRP_PROFILE = 0x10,
+ WMI_GRP_SUSPEND = 0x11,
+ WMI_GRP_BCN_FILTER = 0x12,
+ WMI_GRP_WOW = 0x13,
+ WMI_GRP_RTT = 0x14,
+ WMI_GRP_SPECTRAL = 0x15,
+ WMI_GRP_STATS = 0x16,
+ WMI_GRP_ARP_NS_OFL = 0x17,
+ WMI_GRP_NLO_OFL = 0x18,
+ WMI_GRP_GTK_OFL = 0x19,
+ WMI_GRP_CSA_OFL = 0x1a,
+ WMI_GRP_CHATTER = 0x1b,
+ WMI_GRP_TID_ADDBA = 0x1c,
+ WMI_GRP_MISC = 0x1d,
+ WMI_GRP_GPIO = 0x1e,
+ WMI_GRP_FWTEST = 0x1f,
+ WMI_GRP_TDLS = 0x20,
+ WMI_GRP_RESMGR = 0x21,
+ WMI_GRP_STA_SMPS = 0x22,
+ WMI_GRP_WLAN_HB = 0x23,
+ WMI_GRP_RMC = 0x24,
+ WMI_GRP_MHF_OFL = 0x25,
+ WMI_GRP_LOCATION_SCAN = 0x26,
+ WMI_GRP_OEM = 0x27,
+ WMI_GRP_NAN = 0x28,
+ WMI_GRP_COEX = 0x29,
+ WMI_GRP_OBSS_OFL = 0x2a,
+ WMI_GRP_LPI = 0x2b,
+ WMI_GRP_EXTSCAN = 0x2c,
+ WMI_GRP_DHCP_OFL = 0x2d,
+ WMI_GRP_IPA = 0x2e,
+ WMI_GRP_MDNS_OFL = 0x2f,
+ WMI_GRP_SAP_OFL = 0x30,
+ WMI_GRP_OCB = 0x31,
+ WMI_GRP_SOC = 0x32,
+ WMI_GRP_PKT_FILTER = 0x33,
+ WMI_GRP_MAWC = 0x34,
+ WMI_GRP_PMF_OFFLOAD = 0x35,
+ WMI_GRP_BPF_OFFLOAD = 0x36,
+ WMI_GRP_NAN_DATA = 0x37,
+ WMI_GRP_PROTOTYPE = 0x38,
+ WMI_GRP_MONITOR = 0x39,
+ WMI_GRP_REGULATORY = 0x3a,
+ WMI_GRP_HW_DATA_FILTER = 0x3b,
+ WMI_GRP_WLM = 0x3c,
+ WMI_GRP_11K_OFFLOAD = 0x3d,
+ WMI_GRP_TWT = 0x3e,
+ WMI_GRP_MOTION_DET = 0x3f,
+ WMI_GRP_SPATIAL_REUSE = 0x40,
+};
+
+#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
+#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
+
+#define WMI_CMD_UNSUPPORTED 0
+
+enum wmi_tlv_cmd_id {
+ WMI_INIT_CMDID = 0x1,
+ WMI_START_SCAN_CMDID = WMI_TLV_CMD(WMI_GRP_SCAN),
+ WMI_STOP_SCAN_CMDID,
+ WMI_SCAN_CHAN_LIST_CMDID,
+ WMI_SCAN_SCH_PRIO_TBL_CMDID,
+ WMI_SCAN_UPDATE_REQUEST_CMDID,
+ WMI_SCAN_PROB_REQ_OUI_CMDID,
+ WMI_SCAN_ADAPTIVE_DWELL_CONFIG_CMDID,
+ WMI_PDEV_SET_REGDOMAIN_CMDID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_PDEV_SET_CHANNEL_CMDID,
+ WMI_PDEV_SET_PARAM_CMDID,
+ WMI_PDEV_PKTLOG_ENABLE_CMDID,
+ WMI_PDEV_PKTLOG_DISABLE_CMDID,
+ WMI_PDEV_SET_WMM_PARAMS_CMDID,
+ WMI_PDEV_SET_HT_CAP_IE_CMDID,
+ WMI_PDEV_SET_VHT_CAP_IE_CMDID,
+ WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_PDEV_SET_QUIET_MODE_CMDID,
+ WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
+ WMI_PDEV_GET_TPC_CONFIG_CMDID,
+ WMI_PDEV_SET_BASE_MACADDR_CMDID,
+ WMI_PDEV_DUMP_CMDID,
+ WMI_PDEV_SET_LED_CONFIG_CMDID,
+ WMI_PDEV_GET_TEMPERATURE_CMDID,
+ WMI_PDEV_SET_LED_FLASHING_CMDID,
+ WMI_PDEV_SMART_ANT_ENABLE_CMDID,
+ WMI_PDEV_SMART_ANT_SET_RX_ANTENNA_CMDID,
+ WMI_PDEV_SET_ANTENNA_SWITCH_TABLE_CMDID,
+ WMI_PDEV_SET_CTL_TABLE_CMDID,
+ WMI_PDEV_SET_MIMOGAIN_TABLE_CMDID,
+ WMI_PDEV_FIPS_CMDID,
+ WMI_PDEV_GET_ANI_CCK_CONFIG_CMDID,
+ WMI_PDEV_GET_ANI_OFDM_CONFIG_CMDID,
+ WMI_PDEV_GET_NFCAL_POWER_CMDID,
+ WMI_PDEV_GET_TPC_CMDID,
+ WMI_MIB_STATS_ENABLE_CMDID,
+ WMI_PDEV_SET_PCL_CMDID,
+ WMI_PDEV_SET_HW_MODE_CMDID,
+ WMI_PDEV_SET_MAC_CONFIG_CMDID,
+ WMI_PDEV_SET_ANTENNA_MODE_CMDID,
+ WMI_SET_PERIODIC_CHANNEL_STATS_CONFIG_CMDID,
+ WMI_PDEV_WAL_POWER_DEBUG_CMDID,
+ WMI_PDEV_SET_REORDER_TIMEOUT_VAL_CMDID,
+ WMI_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_PDEV_GET_ANTDIV_STATUS_CMDID,
+ WMI_PDEV_GET_CHIP_POWER_STATS_CMDID,
+ WMI_PDEV_SET_STATS_THRESHOLD_CMDID,
+ WMI_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PKT_ROUTING_CMDID,
+ WMI_PDEV_CHECK_CAL_VERSION_CMDID,
+ WMI_PDEV_SET_DIVERSITY_GAIN_CMDID,
+ WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID,
+ WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ WMI_PDEV_UPDATE_PMK_CACHE_CMDID,
+ WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID,
+ WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID,
+ WMI_PDEV_CONFIG_VENDOR_OUI_ACTION_CMDID,
+ WMI_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMDID,
+ WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID,
+ WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
+ WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
+ WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_PDEV_SET_RAP_CONFIG_CMDID,
+ WMI_PDEV_DSM_FILTER_CMDID,
+ WMI_PDEV_FRAME_INJECT_CMDID,
+ WMI_PDEV_TBTT_OFFSET_SYNC_CMDID,
+ WMI_PDEV_SET_SRG_BSS_COLOR_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_PARTIAL_BSSID_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_SET_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMDID,
+ WMI_PDEV_GET_TPC_STATS_CMDID,
+ WMI_PDEV_ENABLE_DURATION_BASED_TX_MODE_SELECTION_CMDID,
+ WMI_PDEV_GET_DPD_STATUS_CMDID,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID,
+ WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_DELETE_CMDID,
+ WMI_VDEV_START_REQUEST_CMDID,
+ WMI_VDEV_RESTART_REQUEST_CMDID,
+ WMI_VDEV_UP_CMDID,
+ WMI_VDEV_STOP_CMDID,
+ WMI_VDEV_DOWN_CMDID,
+ WMI_VDEV_SET_PARAM_CMDID,
+ WMI_VDEV_INSTALL_KEY_CMDID,
+ WMI_VDEV_WNM_SLEEPMODE_CMDID,
+ WMI_VDEV_WMM_ADDTS_CMDID,
+ WMI_VDEV_WMM_DELTS_CMDID,
+ WMI_VDEV_SET_WMM_PARAMS_CMDID,
+ WMI_VDEV_SET_GTX_PARAMS_CMDID,
+ WMI_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMDID,
+ WMI_VDEV_PLMREQ_START_CMDID,
+ WMI_VDEV_PLMREQ_STOP_CMDID,
+ WMI_VDEV_TSF_TSTAMP_ACTION_CMDID,
+ WMI_VDEV_SET_IE_CMDID,
+ WMI_VDEV_RATEMASK_CMDID,
+ WMI_VDEV_ATF_REQUEST_CMDID,
+ WMI_VDEV_SET_DSCP_TID_MAP_CMDID,
+ WMI_VDEV_FILTER_NEIGHBOR_RX_PACKETS_CMDID,
+ WMI_VDEV_SET_QUIET_MODE_CMDID,
+ WMI_VDEV_SET_CUSTOM_AGGR_SIZE_CMDID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMDID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMDID,
+ WMI_PEER_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_DELETE_CMDID,
+ WMI_PEER_FLUSH_TIDS_CMDID,
+ WMI_PEER_SET_PARAM_CMDID,
+ WMI_PEER_ASSOC_CMDID,
+ WMI_PEER_ADD_WDS_ENTRY_CMDID,
+ WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
+ WMI_PEER_MCAST_GROUP_CMDID,
+ WMI_PEER_INFO_REQ_CMDID,
+ WMI_PEER_GET_ESTIMATED_LINKSPEED_CMDID,
+ WMI_PEER_SET_RATE_REPORT_CONDITION_CMDID,
+ WMI_PEER_UPDATE_WDS_ENTRY_CMDID,
+ WMI_PEER_ADD_PROXY_STA_ENTRY_CMDID,
+ WMI_PEER_SMART_ANT_SET_TX_ANTENNA_CMDID,
+ WMI_PEER_SMART_ANT_SET_TRAIN_INFO_CMDID,
+ WMI_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMDID,
+ WMI_PEER_ATF_REQUEST_CMDID,
+ WMI_PEER_BWF_REQUEST_CMDID,
+ WMI_PEER_REORDER_QUEUE_SETUP_CMDID,
+ WMI_PEER_REORDER_QUEUE_REMOVE_CMDID,
+ WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
+ WMI_PEER_ANTDIV_INFO_REQ_CMDID,
+ WMI_BCN_TX_CMDID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_PDEV_SEND_BCN_CMDID,
+ WMI_BCN_TMPL_CMDID,
+ WMI_BCN_FILTER_RX_CMDID,
+ WMI_PRB_REQ_FILTER_RX_CMDID,
+ WMI_MGMT_TX_CMDID,
+ WMI_PRB_TMPL_CMDID,
+ WMI_MGMT_TX_SEND_CMDID,
+ WMI_OFFCHAN_DATA_TX_SEND_CMDID,
+ WMI_PDEV_SEND_FD_CMDID,
+ WMI_BCN_OFFLOAD_CTRL_CMDID,
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
+ WMI_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMDID,
+ WMI_FILS_DISCOVERY_TMPL_CMDID,
+ WMI_ADDBA_CLEAR_RESP_CMDID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_ADDBA_SEND_CMDID,
+ WMI_ADDBA_STATUS_CMDID,
+ WMI_DELBA_SEND_CMDID,
+ WMI_ADDBA_SET_RESP_CMDID,
+ WMI_SEND_SINGLEAMSDU_CMDID,
+ WMI_STA_POWERSAVE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_PS),
+ WMI_STA_POWERSAVE_PARAM_CMDID,
+ WMI_STA_MIMO_PS_MODE_CMDID,
+ WMI_PDEV_DFS_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_PDEV_DFS_DISABLE_CMDID,
+ WMI_DFS_PHYERR_FILTER_ENA_CMDID,
+ WMI_DFS_PHYERR_FILTER_DIS_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMDID,
+ WMI_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMDID,
+ WMI_VDEV_ADFS_CH_CFG_CMDID,
+ WMI_VDEV_ADFS_OCAC_ABORT_CMDID,
+ WMI_ROAM_SCAN_MODE = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_ROAM_SCAN_PERIOD,
+ WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_ROAM_AP_PROFILE,
+ WMI_ROAM_CHAN_LIST,
+ WMI_ROAM_SCAN_CMD,
+ WMI_ROAM_SYNCH_COMPLETE,
+ WMI_ROAM_SET_RIC_REQUEST_CMDID,
+ WMI_ROAM_INVOKE_CMDID,
+ WMI_ROAM_FILTER_CMDID,
+ WMI_ROAM_SUBNET_CHANGE_CONFIG_CMDID,
+ WMI_ROAM_CONFIGURE_MAWC_CMDID,
+ WMI_ROAM_SET_MBO_PARAM_CMDID,
+ WMI_ROAM_PER_CONFIG_CMDID,
+ WMI_ROAM_BTM_CONFIG_CMDID,
+ WMI_ENABLE_FILS_CMDID,
+ WMI_OFL_SCAN_ADD_AP_PROFILE = WMI_TLV_CMD(WMI_GRP_OFL_SCAN),
+ WMI_OFL_SCAN_REMOVE_AP_PROFILE,
+ WMI_OFL_SCAN_PERIOD,
+ WMI_P2P_DEV_SET_DEVICE_INFO = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_DEV_SET_DISCOVERABILITY,
+ WMI_P2P_GO_SET_BEACON_IE,
+ WMI_P2P_GO_SET_PROBE_RESP_IE,
+ WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
+ WMI_P2P_DISC_OFFLOAD_CONFIG_CMDID,
+ WMI_P2P_DISC_OFFLOAD_APPIE_CMDID,
+ WMI_P2P_DISC_OFFLOAD_PATTERN_CMDID,
+ WMI_P2P_SET_OPPPS_PARAM_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_START_CMDID,
+ WMI_P2P_LISTEN_OFFLOAD_STOP_CMDID,
+ WMI_AP_PS_PEER_PARAM_CMDID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
+ WMI_AP_PS_EGAP_PARAM_CMDID,
+ WMI_PEER_RATE_RETRY_SCHED_CMDID = WMI_TLV_CMD(WMI_GRP_RATE_CTRL),
+ WMI_WLAN_PROFILE_TRIGGER_CMDID = WMI_TLV_CMD(WMI_GRP_PROFILE),
+ WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
+ WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
+ WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
+ WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
+ WMI_PDEV_SUSPEND_CMDID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_PDEV_RESUME_CMDID,
+ WMI_ADD_BCN_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_BCN_FILTER),
+ WMI_RMV_BCN_FILTER_CMDID,
+ WMI_WOW_ADD_WAKE_PATTERN_CMDID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_WOW_DEL_WAKE_PATTERN_CMDID,
+ WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
+ WMI_WOW_ENABLE_CMDID,
+ WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
+ WMI_WOW_IOAC_ADD_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_DEL_KEEPALIVE_CMDID,
+ WMI_WOW_IOAC_ADD_WAKE_PATTERN_CMDID,
+ WMI_WOW_IOAC_DEL_WAKE_PATTERN_CMDID,
+ WMI_D0_WOW_ENABLE_DISABLE_CMDID,
+ WMI_EXTWOW_ENABLE_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE1_PARAMS_CMDID,
+ WMI_EXTWOW_SET_APP_TYPE2_PARAMS_CMDID,
+ WMI_WOW_ENABLE_ICMPV6_NA_FLT_CMDID,
+ WMI_WOW_UDP_SVC_OFLD_CMDID,
+ WMI_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMDID,
+ WMI_WOW_SET_ACTION_WAKE_UP_CMDID,
+ WMI_RTT_MEASREQ_CMDID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_RTT_TSF_CMDID,
+ WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID = WMI_TLV_CMD(WMI_GRP_SPECTRAL),
+ WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
+ WMI_REQUEST_STATS_CMDID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_MCC_SCHED_TRAFFIC_STATS_CMDID,
+ WMI_REQUEST_STATS_EXT_CMDID,
+ WMI_REQUEST_LINK_STATS_CMDID,
+ WMI_START_LINK_STATS_CMDID,
+ WMI_CLEAR_LINK_STATS_CMDID,
+ WMI_GET_FW_MEM_DUMP_CMDID,
+ WMI_DEBUG_MESG_FLUSH_CMDID,
+ WMI_DIAG_EVENT_LOG_CONFIG_CMDID,
+ WMI_REQUEST_WLAN_STATS_CMDID,
+ WMI_REQUEST_RCPI_CMDID,
+ WMI_REQUEST_PEER_STATS_INFO_CMDID,
+ WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+ WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
+ WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
+ WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_APFIND_CMDID,
+ WMI_PASSPOINT_LIST_CONFIG_CMDID,
+ WMI_NLO_CONFIGURE_MAWC_CMDID,
+ WMI_GTK_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_CSA_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
+ WMI_CHATTER_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_CHATTER_ADD_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_DELETE_COALESCING_FILTER_CMDID,
+ WMI_CHATTER_COALESCING_QUERY_CMDID,
+ WMI_PEER_TID_ADDBA_CMDID = WMI_TLV_CMD(WMI_GRP_TID_ADDBA),
+ WMI_PEER_TID_DELBA_CMDID,
+ WMI_STA_DTIM_PS_METHOD_CMDID,
+ WMI_STA_UAPSD_AUTO_TRIG_CMDID,
+ WMI_STA_KEEPALIVE_CMDID,
+ WMI_BA_REQ_SSN_CMDID,
+ WMI_ECHO_CMDID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_CMDID,
+ WMI_DBGLOG_CFG_CMDID,
+ WMI_PDEV_QVIT_CMDID,
+ WMI_PDEV_FTM_INTG_CMDID,
+ WMI_VDEV_SET_KEEPALIVE_CMDID,
+ WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
+ WMI_SET_MCASTBCAST_FILTER_CMDID,
+ WMI_THERMAL_MGMT_CMDID,
+ WMI_HOST_AUTO_SHUTDOWN_CFG_CMDID,
+ WMI_TPC_CHAINMASK_CONFIG_CMDID,
+ WMI_SET_ANTENNA_DIVERSITY_CMDID,
+ WMI_OCB_SET_SCHED_CMDID,
+ WMI_RSSI_BREACH_MONITOR_CONFIG_CMDID,
+ WMI_LRO_CONFIG_CMDID,
+ WMI_TRANSFER_DATA_TO_FLASH_CMDID,
+ WMI_CONFIG_ENHANCED_MCAST_FILTER_CMDID,
+ WMI_VDEV_WISA_CMDID,
+ WMI_DBGLOG_TIME_STAMP_SYNC_CMDID,
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMDID,
+ WMI_READ_DATA_FROM_FLASH_CMDID,
+ WMI_THERM_THROT_SET_CONF_CMDID,
+ WMI_RUNTIME_DPD_RECAL_CMDID,
+ WMI_GET_TPC_POWER_CMDID,
+ WMI_IDLE_TRIGGER_MONITOR_CMDID,
+ WMI_GPIO_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_GPIO_OUTPUT_CMDID,
+ WMI_TXBF_CMDID,
+ WMI_FWTEST_VDEV_MCC_SET_TBTT_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_FWTEST),
+ WMI_FWTEST_P2P_SET_NOA_PARAM_CMDID,
+ WMI_UNIT_TEST_CMDID,
+ WMI_FWTEST_CMDID,
+ WMI_QBOOST_CFG_CMDID,
+ WMI_TDLS_SET_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_TDLS_PEER_UPDATE_CMDID,
+ WMI_TDLS_SET_OFFCHAN_MODE_CMDID,
+ WMI_RESMGR_ADAPTIVE_OCS_EN_DIS_CMDID = WMI_TLV_CMD(WMI_GRP_RESMGR),
+ WMI_RESMGR_SET_CHAN_TIME_QUOTA_CMDID,
+ WMI_RESMGR_SET_CHAN_LATENCY_CMDID,
+ WMI_STA_SMPS_FORCE_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_STA_SMPS_PARAM_CMDID,
+ WMI_HB_SET_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_WLAN_HB),
+ WMI_HB_SET_TCP_PARAMS_CMDID,
+ WMI_HB_SET_TCP_PKT_FILTER_CMDID,
+ WMI_HB_SET_UDP_PARAMS_CMDID,
+ WMI_HB_SET_UDP_PKT_FILTER_CMDID,
+ WMI_RMC_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_RMC_SET_ACTION_PERIOD_CMDID,
+ WMI_RMC_CONFIG_CMDID,
+ WMI_RMC_SET_MANUAL_LEADER_CMDID,
+ WMI_MHF_OFFLOAD_SET_MODE_CMDID = WMI_TLV_CMD(WMI_GRP_MHF_OFL),
+ WMI_MHF_OFFLOAD_PLUMB_ROUTING_TBL_CMDID,
+ WMI_BATCH_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_DISABLE_CMDID,
+ WMI_BATCH_SCAN_TRIGGER_RESULT_CMDID,
+ WMI_OEM_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_REQUEST_CMDID,
+ WMI_LPI_OEM_REQ_CMDID,
+ WMI_NAN_CMDID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_MODEM_POWER_STATE_CMDID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_CHAN_AVOID_UPDATE_CMDID,
+ WMI_COEX_CONFIG_CMDID,
+ WMI_CHAN_AVOID_RPT_ALLOW_CMDID,
+ WMI_COEX_GET_ANTENNA_ISOLATION_CMDID,
+ WMI_SAR_LIMITS_CMDID,
+ WMI_OBSS_SCAN_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_OBSS_OFL),
+ WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID,
+ WMI_LPI_MGMT_SNOOPING_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_START_SCAN_CMDID,
+ WMI_LPI_STOP_SCAN_CMDID,
+ WMI_EXTSCAN_START_CMDID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_STOP_CMDID,
+ WMI_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMDID,
+ WMI_EXTSCAN_GET_CACHED_RESULTS_CMDID,
+ WMI_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMDID,
+ WMI_EXTSCAN_SET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_GET_CAPABILITIES_CMDID,
+ WMI_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMDID,
+ WMI_EXTSCAN_CONFIGURE_MAWC_CMDID,
+ WMI_SET_DHCP_SERVER_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_DHCP_OFL),
+ WMI_IPA_OFFLOAD_ENABLE_DISABLE_CMDID = WMI_TLV_CMD(WMI_GRP_IPA),
+ WMI_MDNS_OFFLOAD_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_MDNS_SET_FQDN_CMDID,
+ WMI_MDNS_SET_RESPONSE_CMDID,
+ WMI_MDNS_GET_STATS_CMDID,
+ WMI_SAP_OFL_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_SET_BLACKLIST_PARAM_CMDID,
+ WMI_OCB_SET_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_SET_UTC_TIME_CMDID,
+ WMI_OCB_START_TIMING_ADVERT_CMDID,
+ WMI_OCB_STOP_TIMING_ADVERT_CMDID,
+ WMI_OCB_GET_TSF_TIMER_CMDID,
+ WMI_DCC_GET_STATS_CMDID,
+ WMI_DCC_CLEAR_STATS_CMDID,
+ WMI_DCC_UPDATE_NDL_CMDID,
+ WMI_SOC_SET_PCL_CMDID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_SET_HW_MODE_CMDID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_CMDID,
+ WMI_SOC_SET_ANTENNA_MODE_CMDID,
+ WMI_PACKET_FILTER_CONFIG_CMDID = WMI_TLV_CMD(WMI_GRP_PKT_FILTER),
+ WMI_PACKET_FILTER_ENABLE_CMDID,
+ WMI_MAWC_SENSOR_REPORT_IND_CMDID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_PMF_OFFLOAD_SET_SA_QUERY_CMDID = WMI_TLV_CMD(WMI_GRP_PMF_OFFLOAD),
+ WMI_BPF_GET_CAPABILITY_CMDID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_GET_VDEV_STATS_CMDID,
+ WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_DEL_VDEV_INSTRUCTIONS_CMDID,
+ WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID,
+ WMI_MNT_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_MONITOR),
+ WMI_SET_CURRENT_COUNTRY_CMDID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_SCAN_START_CMDID,
+ WMI_11D_SCAN_STOP_CMDID,
+ WMI_SET_INIT_COUNTRY_CMDID,
+ WMI_NDI_GET_CAP_REQ_CMDID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_REQ_CMDID,
+ WMI_NDP_RESPONDER_REQ_CMDID,
+ WMI_NDP_END_REQ_CMDID,
+ WMI_HW_DATA_FILTER_CMDID = WMI_TLV_CMD(WMI_GRP_HW_DATA_FILTER),
+ WMI_TWT_ENABLE_CMDID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_CMDID,
+ WMI_TWT_ADD_DIALOG_CMDID,
+ WMI_TWT_DEL_DIALOG_CMDID,
+ WMI_TWT_PAUSE_DIALOG_CMDID,
+ WMI_TWT_RESUME_DIALOG_CMDID,
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
+ WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
+ WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+};
+
+enum wmi_tlv_event_id {
+ WMI_SERVICE_READY_EVENTID = 0x1,
+ WMI_READY_EVENTID,
+ WMI_SERVICE_AVAILABLE_EVENTID,
+ WMI_SCAN_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_SCAN),
+ WMI_PDEV_TPC_CONFIG_EVENTID = WMI_TLV_CMD(WMI_GRP_PDEV),
+ WMI_CHAN_INFO_EVENTID,
+ WMI_PHYERR_EVENTID,
+ WMI_PDEV_DUMP_EVENTID,
+ WMI_TX_PAUSE_EVENTID,
+ WMI_DFS_RADAR_EVENTID,
+ WMI_PDEV_L1SS_TRACK_EVENTID,
+ WMI_PDEV_TEMPERATURE_EVENTID,
+ WMI_SERVICE_READY_EXT_EVENTID,
+ WMI_PDEV_FIPS_EVENTID,
+ WMI_PDEV_CHANNEL_HOPPING_EVENTID,
+ WMI_PDEV_ANI_CCK_LEVEL_EVENTID,
+ WMI_PDEV_ANI_OFDM_LEVEL_EVENTID,
+ WMI_PDEV_TPC_EVENTID,
+ WMI_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENTID,
+ WMI_PDEV_SET_HW_MODE_RESP_EVENTID,
+ WMI_PDEV_HW_MODE_TRANSITION_EVENTID,
+ WMI_PDEV_SET_MAC_CONFIG_RESP_EVENTID,
+ WMI_PDEV_ANTDIV_STATUS_EVENTID,
+ WMI_PDEV_CHIP_POWER_STATS_EVENTID,
+ WMI_PDEV_CHIP_POWER_SAVE_FAILURE_DETECTED_EVENTID,
+ WMI_PDEV_CSA_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_CHECK_CAL_VERSION_EVENTID,
+ WMI_PDEV_DIV_RSSI_ANTID_EVENTID,
+ WMI_PDEV_BSS_CHAN_INFO_EVENTID,
+ WMI_PDEV_UPDATE_CTLTABLE_EVENTID,
+ WMI_PDEV_DMA_RING_CFG_RSP_EVENTID,
+ WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID,
+ WMI_PDEV_CTL_FAILSAFE_CHECK_EVENTID,
+ WMI_PDEV_CSC_SWITCH_COUNT_STATUS_EVENTID,
+ WMI_PDEV_COLD_BOOT_CAL_DATA_EVENTID,
+ WMI_PDEV_RAP_INFO_EVENTID,
+ WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
+ WMI_SERVICE_READY_EXT2_EVENTID,
+ WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
+ WMI_VDEV_STOPPED_EVENTID,
+ WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
+ WMI_VDEV_MCC_BCN_INTERVAL_CHANGE_REQ_EVENTID,
+ WMI_VDEV_TSF_REPORT_EVENTID,
+ WMI_VDEV_DELETE_RESP_EVENTID,
+ WMI_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENTID,
+ WMI_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENTID,
+ WMI_PEER_STA_KICKOUT_EVENTID = WMI_TLV_CMD(WMI_GRP_PEER),
+ WMI_PEER_INFO_EVENTID,
+ WMI_PEER_TX_FAIL_CNT_THR_EVENTID,
+ WMI_PEER_ESTIMATED_LINKSPEED_EVENTID,
+ WMI_PEER_STATE_EVENTID,
+ WMI_PEER_ASSOC_CONF_EVENTID,
+ WMI_PEER_DELETE_RESP_EVENTID,
+ WMI_PEER_RATECODE_LIST_EVENTID,
+ WMI_WDS_PEER_EVENTID,
+ WMI_PEER_STA_PS_STATECHG_EVENTID,
+ WMI_PEER_ANTDIV_INFO_EVENTID,
+ WMI_PEER_RESERVED0_EVENTID,
+ WMI_PEER_RESERVED1_EVENTID,
+ WMI_PEER_RESERVED2_EVENTID,
+ WMI_PEER_RESERVED3_EVENTID,
+ WMI_PEER_RESERVED4_EVENTID,
+ WMI_PEER_RESERVED5_EVENTID,
+ WMI_PEER_RESERVED6_EVENTID,
+ WMI_PEER_RESERVED7_EVENTID,
+ WMI_PEER_RESERVED8_EVENTID,
+ WMI_PEER_RESERVED9_EVENTID,
+ WMI_PEER_RESERVED10_EVENTID,
+ WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_PEER_TX_PN_RESPONSE_EVENTID,
+ WMI_PEER_CFR_CAPTURE_EVENTID,
+ WMI_PEER_CREATE_CONF_EVENTID,
+ WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
+ WMI_HOST_SWBA_EVENTID,
+ WMI_TBTTOFFSET_UPDATE_EVENTID,
+ WMI_OFFLOAD_BCN_TX_STATUS_EVENTID,
+ WMI_OFFLOAD_PROB_RESP_TX_STATUS_EVENTID,
+ WMI_MGMT_TX_COMPLETION_EVENTID,
+ WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID,
+ WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
+ WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
+ WMI_HOST_FILS_DISCOVERY_EVENTID,
+ WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
+ WMI_TX_ADDBA_COMPLETE_EVENTID,
+ WMI_BA_RSP_SSN_EVENTID,
+ WMI_AGGR_STATE_TRIG_EVENTID,
+ WMI_ROAM_EVENTID = WMI_TLV_CMD(WMI_GRP_ROAM),
+ WMI_PROFILE_MATCH,
+ WMI_ROAM_SYNCH_EVENTID,
+ WMI_P2P_DISC_EVENTID = WMI_TLV_CMD(WMI_GRP_P2P),
+ WMI_P2P_NOA_EVENTID,
+ WMI_P2P_LISTEN_OFFLOAD_STOPPED_EVENTID,
+ WMI_AP_PS_EGAP_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_AP_PS),
+ WMI_PDEV_RESUME_EVENTID = WMI_TLV_CMD(WMI_GRP_SUSPEND),
+ WMI_WOW_WAKEUP_HOST_EVENTID = WMI_TLV_CMD(WMI_GRP_WOW),
+ WMI_D0_WOW_DISABLE_ACK_EVENTID,
+ WMI_WOW_INITIAL_WAKEUP_EVENTID,
+ WMI_RTT_MEASUREMENT_REPORT_EVENTID = WMI_TLV_CMD(WMI_GRP_RTT),
+ WMI_TSF_MEASUREMENT_REPORT_EVENTID,
+ WMI_RTT_ERROR_REPORT_EVENTID,
+ WMI_STATS_EXT_EVENTID = WMI_TLV_CMD(WMI_GRP_STATS),
+ WMI_IFACE_LINK_STATS_EVENTID,
+ WMI_PEER_LINK_STATS_EVENTID,
+ WMI_RADIO_LINK_STATS_EVENTID,
+ WMI_UPDATE_FW_MEM_DUMP_EVENTID,
+ WMI_DIAG_EVENT_LOG_SUPPORTED_EVENTID,
+ WMI_INST_RSSI_STATS_EVENTID,
+ WMI_RADIO_TX_POWER_LEVEL_STATS_EVENTID,
+ WMI_REPORT_STATS_EVENTID,
+ WMI_UPDATE_RCPI_EVENTID,
+ WMI_PEER_STATS_INFO_EVENTID,
+ WMI_RADIO_CHAN_STATS_EVENTID,
+ WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
+ WMI_NLO_SCAN_COMPLETE_EVENTID,
+ WMI_APFIND_EVENTID,
+ WMI_PASSPOINT_MATCH_EVENTID,
+ WMI_GTK_OFFLOAD_STATUS_EVENTID = WMI_TLV_CMD(WMI_GRP_GTK_OFL),
+ WMI_GTK_REKEY_FAIL_EVENTID,
+ WMI_CSA_HANDLING_EVENTID = WMI_TLV_CMD(WMI_GRP_CSA_OFL),
+ WMI_CHATTER_PC_QUERY_EVENTID = WMI_TLV_CMD(WMI_GRP_CHATTER),
+ WMI_PDEV_DFS_RADAR_DETECTION_EVENTID = WMI_TLV_CMD(WMI_GRP_DFS),
+ WMI_VDEV_DFS_CAC_COMPLETE_EVENTID,
+ WMI_VDEV_ADFS_OCAC_COMPLETE_EVENTID,
+ WMI_ECHO_EVENTID = WMI_TLV_CMD(WMI_GRP_MISC),
+ WMI_PDEV_UTF_EVENTID,
+ WMI_DEBUG_MESG_EVENTID,
+ WMI_UPDATE_STATS_EVENTID,
+ WMI_DEBUG_PRINT_EVENTID,
+ WMI_DCS_INTERFERENCE_EVENTID,
+ WMI_PDEV_QVIT_EVENTID,
+ WMI_WLAN_PROFILE_DATA_EVENTID,
+ WMI_PDEV_FTM_INTG_EVENTID,
+ WMI_WLAN_FREQ_AVOID_EVENTID,
+ WMI_VDEV_GET_KEEPALIVE_EVENTID,
+ WMI_THERMAL_MGMT_EVENTID,
+ WMI_DIAG_DATA_CONTAINER_EVENTID,
+ WMI_HOST_AUTO_SHUTDOWN_EVENTID,
+ WMI_UPDATE_WHAL_MIB_STATS_EVENTID,
+ WMI_UPDATE_VDEV_RATE_STATS_EVENTID,
+ WMI_DIAG_EVENTID,
+ WMI_OCB_SET_SCHED_EVENTID,
+ WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID,
+ WMI_RSSI_BREACH_EVENTID,
+ WMI_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENTID,
+ WMI_PDEV_UTF_SCPC_EVENTID,
+ WMI_READ_DATA_FROM_FLASH_EVENTID,
+ WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
+ WMI_PKGID_EVENTID,
+ WMI_GPIO_INPUT_EVENTID = WMI_TLV_CMD(WMI_GRP_GPIO),
+ WMI_UPLOADH_EVENTID,
+ WMI_CAPTUREH_EVENTID,
+ WMI_RFKILL_STATE_CHANGE_EVENTID,
+ WMI_TDLS_PEER_EVENTID = WMI_TLV_CMD(WMI_GRP_TDLS),
+ WMI_STA_SMPS_FORCE_MODE_COMPL_EVENTID = WMI_TLV_CMD(WMI_GRP_STA_SMPS),
+ WMI_BATCH_SCAN_ENABLED_EVENTID = WMI_TLV_CMD(WMI_GRP_LOCATION_SCAN),
+ WMI_BATCH_SCAN_RESULT_EVENTID,
+ WMI_OEM_CAPABILITY_EVENTID = WMI_TLV_CMD(WMI_GRP_OEM),
+ WMI_OEM_MEASUREMENT_REPORT_EVENTID,
+ WMI_OEM_ERROR_REPORT_EVENTID,
+ WMI_OEM_RESPONSE_EVENTID,
+ WMI_NAN_EVENTID = WMI_TLV_CMD(WMI_GRP_NAN),
+ WMI_NAN_DISC_IFACE_CREATED_EVENTID,
+ WMI_NAN_DISC_IFACE_DELETED_EVENTID,
+ WMI_NAN_STARTED_CLUSTER_EVENTID,
+ WMI_NAN_JOINED_CLUSTER_EVENTID,
+ WMI_COEX_REPORT_ANTENNA_ISOLATION_EVENTID = WMI_TLV_CMD(WMI_GRP_COEX),
+ WMI_LPI_RESULT_EVENTID = WMI_TLV_CMD(WMI_GRP_LPI),
+ WMI_LPI_STATUS_EVENTID,
+ WMI_LPI_HANDOFF_EVENTID,
+ WMI_EXTSCAN_START_STOP_EVENTID = WMI_TLV_CMD(WMI_GRP_EXTSCAN),
+ WMI_EXTSCAN_OPERATION_EVENTID,
+ WMI_EXTSCAN_TABLE_USAGE_EVENTID,
+ WMI_EXTSCAN_CACHED_RESULTS_EVENTID,
+ WMI_EXTSCAN_WLAN_CHANGE_RESULTS_EVENTID,
+ WMI_EXTSCAN_HOTLIST_MATCH_EVENTID,
+ WMI_EXTSCAN_CAPABILITIES_EVENTID,
+ WMI_EXTSCAN_HOTLIST_SSID_MATCH_EVENTID,
+ WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
+ WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
+ WMI_SAP_OFL_DEL_STA_EVENTID,
+ WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
+ WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
+ WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
+ WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
+ WMI_DCC_GET_STATS_RESP_EVENTID,
+ WMI_DCC_UPDATE_NDL_RESP_EVENTID,
+ WMI_DCC_STATS_EVENTID,
+ WMI_SOC_SET_HW_MODE_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_SOC),
+ WMI_SOC_HW_MODE_TRANSITION_EVENTID,
+ WMI_SOC_SET_DUAL_MAC_CONFIG_RESP_EVENTID,
+ WMI_MAWC_ENABLE_SENSOR_EVENTID = WMI_TLV_CMD(WMI_GRP_MAWC),
+ WMI_BPF_CAPABILIY_INFO_EVENTID = WMI_TLV_CMD(WMI_GRP_BPF_OFFLOAD),
+ WMI_BPF_VDEV_STATS_INFO_EVENTID,
+ WMI_RMC_NEW_LEADER_EVENTID = WMI_TLV_CMD(WMI_GRP_RMC),
+ WMI_REG_CHAN_LIST_CC_EVENTID = WMI_TLV_CMD(WMI_GRP_REGULATORY),
+ WMI_11D_NEW_COUNTRY_EVENTID,
+ WMI_REG_CHAN_LIST_CC_EXT_EVENTID,
+ WMI_NDI_CAP_RSP_EVENTID = WMI_TLV_CMD(WMI_GRP_PROTOTYPE),
+ WMI_NDP_INITIATOR_RSP_EVENTID,
+ WMI_NDP_RESPONDER_RSP_EVENTID,
+ WMI_NDP_END_RSP_EVENTID,
+ WMI_NDP_INDICATION_EVENTID,
+ WMI_NDP_CONFIRM_EVENTID,
+ WMI_NDP_END_INDICATION_EVENTID,
+
+ WMI_TWT_ENABLE_EVENTID = WMI_TLV_CMD(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_EVENTID,
+ WMI_TWT_ADD_DIALOG_EVENTID,
+ WMI_TWT_DEL_DIALOG_EVENTID,
+ WMI_TWT_PAUSE_DIALOG_EVENTID,
+ WMI_TWT_RESUME_DIALOG_EVENTID,
+};
+
+enum wmi_tlv_pdev_param {
+ WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
+ WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
+ WMI_PDEV_PARAM_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_BEACON_GEN_MODE,
+ WMI_PDEV_PARAM_BEACON_TX_MODE,
+ WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
+ WMI_PDEV_PARAM_PROTECTION_MODE,
+ WMI_PDEV_PARAM_DYNAMIC_BW,
+ WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_PDEV_PARAM_STA_KICKOUT_TH,
+ WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
+ WMI_PDEV_PARAM_LTR_ENABLE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
+ WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
+ WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
+ WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
+ WMI_PDEV_PARAM_L1SS_ENABLE,
+ WMI_PDEV_PARAM_DSLEEP_ENABLE,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_WATERMARK,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
+ WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
+ WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_PMF_QOS,
+ WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
+ WMI_PDEV_PARAM_DCS,
+ WMI_PDEV_PARAM_ANI_ENABLE,
+ WMI_PDEV_PARAM_ANI_POLL_PERIOD,
+ WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
+ WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
+ WMI_PDEV_PARAM_ANI_CCK_LEVEL,
+ WMI_PDEV_PARAM_DYNTXCHAIN,
+ WMI_PDEV_PARAM_PROXY_STA,
+ WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ WMI_PDEV_PARAM_POWER_GATING_SLEEP,
+ WMI_PDEV_PARAM_RFKILL_ENABLE,
+ WMI_PDEV_PARAM_BURST_DUR,
+ WMI_PDEV_PARAM_BURST_ENABLE,
+ WMI_PDEV_PARAM_HW_RFKILL_CONFIG,
+ WMI_PDEV_PARAM_LOW_POWER_RF_ENABLE,
+ WMI_PDEV_PARAM_L1SS_TRACK,
+ WMI_PDEV_PARAM_HYST_EN,
+ WMI_PDEV_PARAM_POWER_COLLAPSE_ENABLE,
+ WMI_PDEV_PARAM_LED_SYS_STATE,
+ WMI_PDEV_PARAM_LED_ENABLE,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_LATENCY,
+ WMI_PDEV_PARAM_AUDIO_OVER_WLAN_ENABLE,
+ WMI_PDEV_PARAM_WHAL_MIB_STATS_UPDATE_ENABLE,
+ WMI_PDEV_PARAM_VDEV_RATE_STATS_UPDATE_PERIOD,
+ WMI_PDEV_PARAM_CTS_CBW,
+ WMI_PDEV_PARAM_WNTS_CONFIG,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_ENABLE,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_MIN_SLEEP_SLOP,
+ WMI_PDEV_PARAM_ADAPTIVE_EARLY_RX_INC_DEC_STEP,
+ WMI_PDEV_PARAM_EARLY_RX_FIX_SLEEP_SLOP,
+ WMI_PDEV_PARAM_BMISS_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_BMISS_BTO_MIN_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_BMISS_BTO_INC_DEC_STEP,
+ WMI_PDEV_PARAM_BTO_FIX_BCN_TIMEOUT,
+ WMI_PDEV_PARAM_CE_BASED_ADAPTIVE_BTO_ENABLE,
+ WMI_PDEV_PARAM_CE_BTO_COMBO_CE_VALUE,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_2G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_RX_CHAIN_MASK_5G,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_CCK,
+ WMI_PDEV_PARAM_TX_CHAIN_MASK_1SS,
+ WMI_PDEV_PARAM_CTS2SELF_FOR_P2P_GO_CONFIG,
+ WMI_PDEV_PARAM_TXPOWER_DECR_DB,
+ WMI_PDEV_PARAM_AGGR_BURST,
+ WMI_PDEV_PARAM_RX_DECAP_MODE,
+ WMI_PDEV_PARAM_FAST_CHANNEL_RESET,
+ WMI_PDEV_PARAM_SMART_ANTENNA_DEFAULT_ANTENNA,
+ WMI_PDEV_PARAM_ANTENNA_GAIN,
+ WMI_PDEV_PARAM_RX_FILTER,
+ WMI_PDEV_SET_MCAST_TO_UCAST_TID,
+ WMI_PDEV_PARAM_PROXY_STA_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_MODE,
+ WMI_PDEV_PARAM_SET_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PARAM_REMOVE_MCAST2UCAST_BUFFER,
+ WMI_PDEV_PEER_STA_PS_STATECHG_ENABLE,
+ WMI_PDEV_PARAM_IGMPMLD_AC_OVERRIDE,
+ WMI_PDEV_PARAM_BLOCK_INTERBSS,
+ WMI_PDEV_PARAM_SET_DISABLE_RESET_CMDID,
+ WMI_PDEV_PARAM_SET_MSDU_TTL_CMDID,
+ WMI_PDEV_PARAM_SET_PPDU_DURATION_CMDID,
+ WMI_PDEV_PARAM_TXBF_SOUND_PERIOD_CMDID,
+ WMI_PDEV_PARAM_SET_PROMISC_MODE_CMDID,
+ WMI_PDEV_PARAM_SET_BURST_MODE_CMDID,
+ WMI_PDEV_PARAM_EN_STATS,
+ WMI_PDEV_PARAM_MU_GROUP_POLICY,
+ WMI_PDEV_PARAM_NOISE_DETECTION,
+ WMI_PDEV_PARAM_NOISE_THRESHOLD,
+ WMI_PDEV_PARAM_DPD_ENABLE,
+ WMI_PDEV_PARAM_SET_MCAST_BCAST_ECHO,
+ WMI_PDEV_PARAM_ATF_STRICT_SCH,
+ WMI_PDEV_PARAM_ATF_SCHED_DURATION,
+ WMI_PDEV_PARAM_ANT_PLZN,
+ WMI_PDEV_PARAM_MGMT_RETRY_LIMIT,
+ WMI_PDEV_PARAM_SENSITIVITY_LEVEL,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_2G,
+ WMI_PDEV_PARAM_SIGNED_TXPOWER_5G,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMSDU,
+ WMI_PDEV_PARAM_ENABLE_PER_TID_AMPDU,
+ WMI_PDEV_PARAM_CCA_THRESHOLD,
+ WMI_PDEV_PARAM_RTS_FIXED_RATE,
+ WMI_PDEV_PARAM_PDEV_RESET,
+ WMI_PDEV_PARAM_WAPI_MBSSID_OFFSET,
+ WMI_PDEV_PARAM_ARP_DBG_SRCADDR,
+ WMI_PDEV_PARAM_ARP_DBG_DSTADDR,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCH,
+ WMI_PDEV_PARAM_ATF_OBSS_NOISE_SCALING_FACTOR,
+ WMI_PDEV_PARAM_CUST_TXPOWER_SCALE,
+ WMI_PDEV_PARAM_ATF_DYNAMIC_ENABLE,
+ WMI_PDEV_PARAM_CTRL_RETRY_LIMIT,
+ WMI_PDEV_PARAM_PROPAGATION_DELAY,
+ WMI_PDEV_PARAM_ENA_ANT_DIV,
+ WMI_PDEV_PARAM_FORCE_CHAIN_ANT,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST,
+ WMI_PDEV_PARAM_ANT_DIV_SELFTEST_INTVL,
+ WMI_PDEV_PARAM_STATS_OBSERVATION_PERIOD,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_BIN_SIZE_MS,
+ WMI_PDEV_PARAM_TX_PPDU_DELAY_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_RX_MPDU_AGGR_ARRAY_LEN,
+ WMI_PDEV_PARAM_TX_SCH_DELAY,
+ WMI_PDEV_PARAM_ENABLE_RTS_SIFS_BURSTING,
+ WMI_PDEV_PARAM_MAX_MPDUS_IN_AMPDU,
+ WMI_PDEV_PARAM_PEER_STATS_INFO_ENABLE,
+ WMI_PDEV_PARAM_FAST_PWR_TRANSITION,
+ WMI_PDEV_PARAM_RADIO_CHAN_STATS_ENABLE,
+ WMI_PDEV_PARAM_RADIO_DIAGNOSIS_ENABLE,
+ WMI_PDEV_PARAM_MESH_MCAST_ENABLE,
+ WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD = 0xbc,
+ WMI_PDEV_PARAM_SET_CMD_OBSS_PD_PER_AC = 0xbe,
+ WMI_PDEV_PARAM_ENABLE_SR_PROHIBIT = 0xc6,
+};
+
+enum wmi_tlv_vdev_param {
+ WMI_VDEV_PARAM_RTS_THRESHOLD = 0x1,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ WMI_VDEV_PARAM_BEACON_INTERVAL,
+ WMI_VDEV_PARAM_LISTEN_INTERVAL,
+ WMI_VDEV_PARAM_MULTICAST_RATE,
+ WMI_VDEV_PARAM_MGMT_TX_RATE,
+ WMI_VDEV_PARAM_SLOT_TIME,
+ WMI_VDEV_PARAM_PREAMBLE,
+ WMI_VDEV_PARAM_SWBA_TIME,
+ WMI_VDEV_STATS_UPDATE_PERIOD,
+ WMI_VDEV_PWRSAVE_AGEOUT_TIME,
+ WMI_VDEV_HOST_SWBA_INTERVAL,
+ WMI_VDEV_PARAM_DTIM_PERIOD,
+ WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
+ WMI_VDEV_PARAM_WDS,
+ WMI_VDEV_PARAM_ATIM_WINDOW,
+ WMI_VDEV_PARAM_BMISS_COUNT_MAX,
+ WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
+ WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
+ WMI_VDEV_PARAM_FEATURE_WMM,
+ WMI_VDEV_PARAM_CHWIDTH,
+ WMI_VDEV_PARAM_CHEXTOFFSET,
+ WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
+ WMI_VDEV_PARAM_STA_QUICKKICKOUT,
+ WMI_VDEV_PARAM_MGMT_RATE,
+ WMI_VDEV_PARAM_PROTECTION_MODE,
+ WMI_VDEV_PARAM_FIXED_RATE,
+ WMI_VDEV_PARAM_SGI,
+ WMI_VDEV_PARAM_LDPC,
+ WMI_VDEV_PARAM_TX_STBC,
+ WMI_VDEV_PARAM_RX_STBC,
+ WMI_VDEV_PARAM_INTRA_BSS_FWD,
+ WMI_VDEV_PARAM_DEF_KEYID,
+ WMI_VDEV_PARAM_NSS,
+ WMI_VDEV_PARAM_BCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_DATA_RATE,
+ WMI_VDEV_PARAM_MCAST_INDICATE,
+ WMI_VDEV_PARAM_DHCP_INDICATE,
+ WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
+ WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
+ WMI_VDEV_PARAM_ENABLE_RTSCTS,
+ WMI_VDEV_PARAM_TXBF,
+ WMI_VDEV_PARAM_PACKET_POWERSAVE,
+ WMI_VDEV_PARAM_DROP_UNENCRY,
+ WMI_VDEV_PARAM_TX_ENCAP_TYPE,
+ WMI_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_ENABLE,
+ WMI_VDEV_PARAM_EARLY_RX_TGT_BMISS_NUM,
+ WMI_VDEV_PARAM_EARLY_RX_BMISS_SAMPLE_CYCLE,
+ WMI_VDEV_PARAM_EARLY_RX_SLOP_STEP,
+ WMI_VDEV_PARAM_EARLY_RX_INIT_SLOP,
+ WMI_VDEV_PARAM_EARLY_RX_ADJUST_PAUSE,
+ WMI_VDEV_PARAM_TX_PWRLIMIT,
+ WMI_VDEV_PARAM_SNR_NUM_FOR_CAL,
+ WMI_VDEV_PARAM_ROAM_FW_OFFLOAD,
+ WMI_VDEV_PARAM_ENABLE_RMC,
+ WMI_VDEV_PARAM_IBSS_MAX_BCN_LOST_MS,
+ WMI_VDEV_PARAM_MAX_RATE,
+ WMI_VDEV_PARAM_EARLY_RX_DRIFT_SAMPLE,
+ WMI_VDEV_PARAM_SET_IBSS_TX_FAIL_CNT_THR,
+ WMI_VDEV_PARAM_EBT_RESYNC_TIMEOUT,
+ WMI_VDEV_PARAM_AGGR_TRIG_EVENT_ENABLE,
+ WMI_VDEV_PARAM_IS_IBSS_POWER_SAVE_ALLOWED,
+ WMI_VDEV_PARAM_IS_POWER_COLLAPSE_ALLOWED,
+ WMI_VDEV_PARAM_IS_AWAKE_ON_TXRX_ENABLED,
+ WMI_VDEV_PARAM_INACTIVITY_CNT,
+ WMI_VDEV_PARAM_TXSP_END_INACTIVITY_TIME_MS,
+ WMI_VDEV_PARAM_DTIM_POLICY,
+ WMI_VDEV_PARAM_IBSS_PS_WARMUP_TIME_SECS,
+ WMI_VDEV_PARAM_IBSS_PS_1RX_CHAIN_IN_ATIM_WINDOW_ENABLE,
+ WMI_VDEV_PARAM_RX_LEAK_WINDOW,
+ WMI_VDEV_PARAM_STATS_AVG_FACTOR,
+ WMI_VDEV_PARAM_DISCONNECT_TH,
+ WMI_VDEV_PARAM_RTSCTS_RATE,
+ WMI_VDEV_PARAM_MCC_RTSCTS_PROTECTION_ENABLE,
+ WMI_VDEV_PARAM_MCC_BROADCAST_PROBE_ENABLE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE,
+ WMI_VDEV_PARAM_TXPOWER_SCALE_DECR_DB,
+ WMI_VDEV_PARAM_MCAST2UCAST_SET,
+ WMI_VDEV_PARAM_RC_NUM_RETRIES,
+ WMI_VDEV_PARAM_CABQ_MAXDUR,
+ WMI_VDEV_PARAM_MFPTEST_SET,
+ WMI_VDEV_PARAM_RTS_FIXED_RATE,
+ WMI_VDEV_PARAM_VHT_SGIMASK,
+ WMI_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_VDEV_PARAM_PROXY_STA,
+ WMI_VDEV_PARAM_VIRTUAL_CELL_MODE,
+ WMI_VDEV_PARAM_RX_DECAP_TYPE,
+ WMI_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_VDEV_PARAM_SENSOR_AP,
+ WMI_VDEV_PARAM_BEACON_RATE,
+ WMI_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_VDEV_PARAM_STA_KICKOUT,
+ WMI_VDEV_PARAM_CAPABILITIES,
+ WMI_VDEV_PARAM_TSF_INCREMENT,
+ WMI_VDEV_PARAM_AMPDU_PER_AC,
+ WMI_VDEV_PARAM_RX_FILTER,
+ WMI_VDEV_PARAM_MGMT_TX_POWER,
+ WMI_VDEV_PARAM_NON_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_AGG_SW_RETRY_TH,
+ WMI_VDEV_PARAM_DISABLE_DYN_BW_RTS,
+ WMI_VDEV_PARAM_ATF_SSID_SCHED_POLICY,
+ WMI_VDEV_PARAM_HE_DCM,
+ WMI_VDEV_PARAM_HE_RANGE_EXT,
+ WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
+ WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_HE_LTF = 0x74,
+ WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE = 0x7d,
+ WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
+ WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
+ WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
+ WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
+ WMI_VDEV_PARAM_BSS_COLOR,
+ WMI_VDEV_PARAM_SET_HEMU_MODE,
+ WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003,
+};
+
+enum wmi_tlv_peer_flags {
+ WMI_TLV_PEER_AUTH = 0x00000001,
+ WMI_TLV_PEER_QOS = 0x00000002,
+ WMI_TLV_PEER_NEED_PTK_4_WAY = 0x00000004,
+ WMI_TLV_PEER_NEED_GTK_2_WAY = 0x00000010,
+ WMI_TLV_PEER_APSD = 0x00000800,
+ WMI_TLV_PEER_HT = 0x00001000,
+ WMI_TLV_PEER_40MHZ = 0x00002000,
+ WMI_TLV_PEER_STBC = 0x00008000,
+ WMI_TLV_PEER_LDPC = 0x00010000,
+ WMI_TLV_PEER_DYN_MIMOPS = 0x00020000,
+ WMI_TLV_PEER_STATIC_MIMOPS = 0x00040000,
+ WMI_TLV_PEER_SPATIAL_MUX = 0x00200000,
+ WMI_TLV_PEER_VHT = 0x02000000,
+ WMI_TLV_PEER_80MHZ = 0x04000000,
+ WMI_TLV_PEER_PMF = 0x08000000,
+ WMI_PEER_IS_P2P_CAPABLE = 0x20000000,
+ WMI_PEER_160MHZ = 0x40000000,
+ WMI_PEER_SAFEMODE_EN = 0x80000000,
+
+};
+
+/** Enum list of TLV Tags for each parameter structure type. */
+enum wmi_tlv_tag {
+ WMI_TAG_LAST_RESERVED = 15,
+ WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_UINT32 = WMI_TAG_FIRST_ARRAY_ENUM,
+ WMI_TAG_ARRAY_BYTE,
+ WMI_TAG_ARRAY_STRUCT,
+ WMI_TAG_ARRAY_FIXED_STRUCT,
+ WMI_TAG_LAST_ARRAY_ENUM = 31,
+ WMI_TAG_SERVICE_READY_EVENT,
+ WMI_TAG_HAL_REG_CAPABILITIES,
+ WMI_TAG_WLAN_HOST_MEM_REQ,
+ WMI_TAG_READY_EVENT,
+ WMI_TAG_SCAN_EVENT,
+ WMI_TAG_PDEV_TPC_CONFIG_EVENT,
+ WMI_TAG_CHAN_INFO_EVENT,
+ WMI_TAG_COMB_PHYERR_RX_HDR,
+ WMI_TAG_VDEV_START_RESPONSE_EVENT,
+ WMI_TAG_VDEV_STOPPED_EVENT,
+ WMI_TAG_VDEV_INSTALL_KEY_COMPLETE_EVENT,
+ WMI_TAG_PEER_STA_KICKOUT_EVENT,
+ WMI_TAG_MGMT_RX_HDR,
+ WMI_TAG_TBTT_OFFSET_EVENT,
+ WMI_TAG_TX_DELBA_COMPLETE_EVENT,
+ WMI_TAG_TX_ADDBA_COMPLETE_EVENT,
+ WMI_TAG_ROAM_EVENT,
+ WMI_TAG_WOW_EVENT_INFO,
+ WMI_TAG_WOW_EVENT_INFO_SECTION_BITMAP,
+ WMI_TAG_RTT_EVENT_HEADER,
+ WMI_TAG_RTT_ERROR_REPORT_EVENT,
+ WMI_TAG_RTT_MEAS_EVENT,
+ WMI_TAG_ECHO_EVENT,
+ WMI_TAG_FTM_INTG_EVENT,
+ WMI_TAG_VDEV_GET_KEEPALIVE_EVENT,
+ WMI_TAG_GPIO_INPUT_EVENT,
+ WMI_TAG_CSA_EVENT,
+ WMI_TAG_GTK_OFFLOAD_STATUS_EVENT,
+ WMI_TAG_IGTK_INFO,
+ WMI_TAG_DCS_INTERFERENCE_EVENT,
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_WLAN_DCS_CW_INT = /* ALIAS */
+ WMI_TAG_ATH_DCS_CW_INT,
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_DCS_IM_TGT_STATS_T = /* ALIAS */
+ WMI_TAG_ATH_DCS_WLAN_INT_STAT,
+ WMI_TAG_WLAN_PROFILE_CTX_T,
+ WMI_TAG_WLAN_PROFILE_T,
+ WMI_TAG_PDEV_QVIT_EVENT,
+ WMI_TAG_HOST_SWBA_EVENT,
+ WMI_TAG_TIM_INFO,
+ WMI_TAG_P2P_NOA_INFO,
+ WMI_TAG_STATS_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGES_EVENT,
+ WMI_TAG_AVOID_FREQ_RANGE_DESC,
+ WMI_TAG_GTK_REKEY_FAIL_EVENT,
+ WMI_TAG_INIT_CMD,
+ WMI_TAG_RESOURCE_CONFIG,
+ WMI_TAG_WLAN_HOST_MEMORY_CHUNK,
+ WMI_TAG_START_SCAN_CMD,
+ WMI_TAG_STOP_SCAN_CMD,
+ WMI_TAG_SCAN_CHAN_LIST_CMD,
+ WMI_TAG_CHANNEL,
+ WMI_TAG_PDEV_SET_REGDOMAIN_CMD,
+ WMI_TAG_PDEV_SET_PARAM_CMD,
+ WMI_TAG_PDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_WMM_PARAMS,
+ WMI_TAG_PDEV_SET_QUIET_CMD,
+ WMI_TAG_VDEV_CREATE_CMD,
+ WMI_TAG_VDEV_DELETE_CMD,
+ WMI_TAG_VDEV_START_REQUEST_CMD,
+ WMI_TAG_P2P_NOA_DESCRIPTOR,
+ WMI_TAG_P2P_GO_SET_BEACON_IE,
+ WMI_TAG_GTK_OFFLOAD_CMD,
+ WMI_TAG_VDEV_UP_CMD,
+ WMI_TAG_VDEV_STOP_CMD,
+ WMI_TAG_VDEV_DOWN_CMD,
+ WMI_TAG_VDEV_SET_PARAM_CMD,
+ WMI_TAG_VDEV_INSTALL_KEY_CMD,
+ WMI_TAG_PEER_CREATE_CMD,
+ WMI_TAG_PEER_DELETE_CMD,
+ WMI_TAG_PEER_FLUSH_TIDS_CMD,
+ WMI_TAG_PEER_SET_PARAM_CMD,
+ WMI_TAG_PEER_ASSOC_COMPLETE_CMD,
+ WMI_TAG_VHT_RATE_SET,
+ WMI_TAG_BCN_TMPL_CMD,
+ WMI_TAG_PRB_TMPL_CMD,
+ WMI_TAG_BCN_PRB_INFO,
+ WMI_TAG_PEER_TID_ADDBA_CMD,
+ WMI_TAG_PEER_TID_DELBA_CMD,
+ WMI_TAG_STA_POWERSAVE_MODE_CMD,
+ WMI_TAG_STA_POWERSAVE_PARAM_CMD,
+ WMI_TAG_STA_DTIM_PS_METHOD_CMD,
+ WMI_TAG_ROAM_SCAN_MODE,
+ WMI_TAG_ROAM_SCAN_RSSI_THRESHOLD,
+ WMI_TAG_ROAM_SCAN_PERIOD,
+ WMI_TAG_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
+ WMI_TAG_PDEV_SUSPEND_CMD,
+ WMI_TAG_PDEV_RESUME_CMD,
+ WMI_TAG_ADD_BCN_FILTER_CMD,
+ WMI_TAG_RMV_BCN_FILTER_CMD,
+ WMI_TAG_WOW_ENABLE_CMD,
+ WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_CMD,
+ WMI_TAG_STA_UAPSD_AUTO_TRIG_PARAM,
+ WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ WMI_TAG_ARP_OFFLOAD_TUPLE,
+ WMI_TAG_NS_OFFLOAD_TUPLE,
+ WMI_TAG_FTM_INTG_CMD,
+ WMI_TAG_STA_KEEPALIVE_CMD,
+ WMI_TAG_STA_KEEPALIVE_ARP_RESPONSE,
+ WMI_TAG_P2P_SET_VENDOR_IE_DATA_CMD,
+ WMI_TAG_AP_PS_PEER_CMD,
+ WMI_TAG_PEER_RATE_RETRY_SCHED_CMD,
+ WMI_TAG_WLAN_PROFILE_TRIGGER_CMD,
+ WMI_TAG_WLAN_PROFILE_SET_HIST_INTVL_CMD,
+ WMI_TAG_WLAN_PROFILE_GET_PROF_DATA_CMD,
+ WMI_TAG_WLAN_PROFILE_ENABLE_PROFILE_ID_CMD,
+ WMI_TAG_WOW_DEL_PATTERN_CMD,
+ WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ WMI_TAG_RTT_MEASREQ_HEAD,
+ WMI_TAG_RTT_MEASREQ_BODY,
+ WMI_TAG_RTT_TSF_CMD,
+ WMI_TAG_VDEV_SPECTRAL_CONFIGURE_CMD,
+ WMI_TAG_VDEV_SPECTRAL_ENABLE_CMD,
+ WMI_TAG_REQUEST_STATS_CMD,
+ WMI_TAG_NLO_CONFIG_CMD,
+ WMI_TAG_NLO_CONFIGURED_PARAMETERS,
+ WMI_TAG_CSA_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_CSA_OFFLOAD_CHANSWITCH_CMD,
+ WMI_TAG_CHATTER_SET_MODE_CMD,
+ WMI_TAG_ECHO_CMD,
+ WMI_TAG_VDEV_SET_KEEPALIVE_CMD,
+ WMI_TAG_VDEV_GET_KEEPALIVE_CMD,
+ WMI_TAG_FORCE_FW_HANG_CMD,
+ WMI_TAG_GPIO_CONFIG_CMD,
+ WMI_TAG_GPIO_OUTPUT_CMD,
+ WMI_TAG_PEER_ADD_WDS_ENTRY_CMD,
+ WMI_TAG_PEER_REMOVE_WDS_ENTRY_CMD,
+ WMI_TAG_BCN_TX_HDR,
+ WMI_TAG_BCN_SEND_FROM_HOST_CMD,
+ WMI_TAG_MGMT_TX_HDR,
+ WMI_TAG_ADDBA_CLEAR_RESP_CMD,
+ WMI_TAG_ADDBA_SEND_CMD,
+ WMI_TAG_DELBA_SEND_CMD,
+ WMI_TAG_ADDBA_SETRESPONSE_CMD,
+ WMI_TAG_SEND_SINGLEAMSDU_CMD,
+ WMI_TAG_PDEV_PKTLOG_ENABLE_CMD,
+ WMI_TAG_PDEV_PKTLOG_DISABLE_CMD,
+ WMI_TAG_PDEV_SET_HT_IE_CMD,
+ WMI_TAG_PDEV_SET_VHT_IE_CMD,
+ WMI_TAG_PDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_PDEV_GREEN_AP_PS_ENABLE_CMD,
+ WMI_TAG_PDEV_GET_TPC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_BASE_MACADDR_CMD,
+ WMI_TAG_PEER_MCAST_GROUP_CMD,
+ WMI_TAG_ROAM_AP_PROFILE,
+ WMI_TAG_AP_PROFILE,
+ WMI_TAG_SCAN_SCH_PRIORITY_TABLE_CMD,
+ WMI_TAG_PDEV_DFS_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_DISABLE_CMD,
+ WMI_TAG_WOW_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_BITMAP_PATTERN_T,
+ WMI_TAG_WOW_IPV4_SYNC_PATTERN_T,
+ WMI_TAG_WOW_IPV6_SYNC_PATTERN_T,
+ WMI_TAG_WOW_MAGIC_PATTERN_CMD,
+ WMI_TAG_SCAN_UPDATE_REQUEST_CMD,
+ WMI_TAG_CHATTER_PKT_COALESCING_FILTER,
+ WMI_TAG_CHATTER_COALESCING_ADD_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_DELETE_FILTER_CMD,
+ WMI_TAG_CHATTER_COALESCING_QUERY_CMD,
+ WMI_TAG_TXBF_CMD,
+ WMI_TAG_DEBUG_LOG_CONFIG_CMD,
+ WMI_TAG_NLO_EVENT,
+ WMI_TAG_CHATTER_QUERY_REPLY_EVENT,
+ WMI_TAG_UPLOAD_H_HDR,
+ WMI_TAG_CAPTURE_H_EVENT_HDR,
+ WMI_TAG_VDEV_WNM_SLEEPMODE_CMD,
+ WMI_TAG_VDEV_IPSEC_NATKEEPALIVE_FILTER_CMD,
+ WMI_TAG_VDEV_WMM_ADDTS_CMD,
+ WMI_TAG_VDEV_WMM_DELTS_CMD,
+ WMI_TAG_VDEV_SET_WMM_PARAMS_CMD,
+ WMI_TAG_TDLS_SET_STATE_CMD,
+ WMI_TAG_TDLS_PEER_UPDATE_CMD,
+ WMI_TAG_TDLS_PEER_EVENT,
+ WMI_TAG_TDLS_PEER_CAPABILITIES,
+ WMI_TAG_VDEV_MCC_SET_TBTT_MODE_CMD,
+ WMI_TAG_ROAM_CHAN_LIST,
+ WMI_TAG_VDEV_MCC_BCN_INTVL_CHANGE_EVENT,
+ WMI_TAG_RESMGR_ADAPTIVE_OCS_ENABLE_DISABLE_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_TIME_QUOTA_CMD,
+ WMI_TAG_RESMGR_SET_CHAN_LATENCY_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD,
+ WMI_TAG_BA_RSP_SSN_EVENT,
+ WMI_TAG_STA_SMPS_FORCE_MODE_CMD,
+ WMI_TAG_SET_MCASTBCAST_FILTER_CMD,
+ WMI_TAG_P2P_SET_OPPPS_CMD,
+ WMI_TAG_P2P_SET_NOA_CMD,
+ WMI_TAG_BA_REQ_SSN_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_BA_REQ_SSN_EVENT_SUB_STRUCT_PARAM,
+ WMI_TAG_STA_SMPS_PARAM_CMD,
+ WMI_TAG_VDEV_SET_GTX_PARAMS_CMD,
+ WMI_TAG_MCC_SCHED_TRAFFIC_STATS_CMD,
+ WMI_TAG_MCC_SCHED_STA_TRAFFIC_STATS,
+ WMI_TAG_OFFLOAD_BCN_TX_STATUS_EVENT,
+ WMI_TAG_P2P_NOA_EVENT,
+ WMI_TAG_HB_SET_ENABLE_CMD,
+ WMI_TAG_HB_SET_TCP_PARAMS_CMD,
+ WMI_TAG_HB_SET_TCP_PKT_FILTER_CMD,
+ WMI_TAG_HB_SET_UDP_PARAMS_CMD,
+ WMI_TAG_HB_SET_UDP_PKT_FILTER_CMD,
+ WMI_TAG_HB_IND_EVENT,
+ WMI_TAG_TX_PAUSE_EVENT,
+ WMI_TAG_RFKILL_EVENT,
+ WMI_TAG_DFS_RADAR_EVENT,
+ WMI_TAG_DFS_PHYERR_FILTER_ENA_CMD,
+ WMI_TAG_DFS_PHYERR_FILTER_DIS_CMD,
+ WMI_TAG_BATCH_SCAN_RESULT_SCAN_LIST,
+ WMI_TAG_BATCH_SCAN_RESULT_NETWORK_INFO,
+ WMI_TAG_BATCH_SCAN_ENABLE_CMD,
+ WMI_TAG_BATCH_SCAN_DISABLE_CMD,
+ WMI_TAG_BATCH_SCAN_TRIGGER_RESULT_CMD,
+ WMI_TAG_BATCH_SCAN_ENABLED_EVENT,
+ WMI_TAG_BATCH_SCAN_RESULT_EVENT,
+ WMI_TAG_VDEV_PLMREQ_START_CMD,
+ WMI_TAG_VDEV_PLMREQ_STOP_CMD,
+ WMI_TAG_THERMAL_MGMT_CMD,
+ WMI_TAG_THERMAL_MGMT_EVENT,
+ WMI_TAG_PEER_INFO_REQ_CMD,
+ WMI_TAG_PEER_INFO_EVENT,
+ WMI_TAG_PEER_INFO,
+ WMI_TAG_PEER_TX_FAIL_CNT_THR_EVENT,
+ WMI_TAG_RMC_SET_MODE_CMD,
+ WMI_TAG_RMC_SET_ACTION_PERIOD_CMD,
+ WMI_TAG_RMC_CONFIG_CMD,
+ WMI_TAG_MHF_OFFLOAD_SET_MODE_CMD,
+ WMI_TAG_MHF_OFFLOAD_PLUMB_ROUTING_TABLE_CMD,
+ WMI_TAG_ADD_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_DEL_PROACTIVE_ARP_RSP_PATTERN_CMD,
+ WMI_TAG_NAN_CMD_PARAM,
+ WMI_TAG_NAN_EVENT_HDR,
+ WMI_TAG_PDEV_L1SS_TRACK_EVENT,
+ WMI_TAG_DIAG_DATA_CONTAINER_EVENT,
+ WMI_TAG_MODEM_POWER_STATE_CMD_PARAM,
+ WMI_TAG_PEER_GET_ESTIMATED_LINKSPEED_CMD,
+ WMI_TAG_PEER_ESTIMATED_LINKSPEED_EVENT,
+ WMI_TAG_AGGR_STATE_TRIG_EVENT,
+ WMI_TAG_MHF_OFFLOAD_ROUTING_TABLE_ENTRY,
+ WMI_TAG_ROAM_SCAN_CMD,
+ WMI_TAG_REQ_STATS_EXT_CMD,
+ WMI_TAG_STATS_EXT_EVENT,
+ WMI_TAG_OBSS_SCAN_ENABLE_CMD,
+ WMI_TAG_OBSS_SCAN_DISABLE_CMD,
+ WMI_TAG_OFFLOAD_PRB_RSP_TX_STATUS_EVENT,
+ WMI_TAG_PDEV_SET_LED_CONFIG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_CFG_CMD,
+ WMI_TAG_HOST_AUTO_SHUTDOWN_EVENT,
+ WMI_TAG_UPDATE_WHAL_MIB_STATS_EVENT,
+ WMI_TAG_CHAN_AVOID_UPDATE_CMD_PARAM,
+ WMI_TAG_WOW_IOAC_PKT_PATTERN_T,
+ WMI_TAG_WOW_IOAC_TMR_PATTERN_T,
+ WMI_TAG_WOW_IOAC_ADD_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_DEL_KEEPALIVE_CMD,
+ WMI_TAG_WOW_IOAC_KEEPALIVE_T,
+ WMI_TAG_WOW_IOAC_ADD_PATTERN_CMD,
+ WMI_TAG_WOW_IOAC_DEL_PATTERN_CMD,
+ WMI_TAG_START_LINK_STATS_CMD,
+ WMI_TAG_CLEAR_LINK_STATS_CMD,
+ WMI_TAG_REQUEST_LINK_STATS_CMD,
+ WMI_TAG_IFACE_LINK_STATS_EVENT,
+ WMI_TAG_RADIO_LINK_STATS_EVENT,
+ WMI_TAG_PEER_STATS_EVENT,
+ WMI_TAG_CHANNEL_STATS,
+ WMI_TAG_RADIO_LINK_STATS,
+ WMI_TAG_RATE_STATS,
+ WMI_TAG_PEER_LINK_STATS,
+ WMI_TAG_WMM_AC_STATS,
+ WMI_TAG_IFACE_LINK_STATS,
+ WMI_TAG_LPI_MGMT_SNOOPING_CONFIG_CMD,
+ WMI_TAG_LPI_START_SCAN_CMD,
+ WMI_TAG_LPI_STOP_SCAN_CMD,
+ WMI_TAG_LPI_RESULT_EVENT,
+ WMI_TAG_PEER_STATE_EVENT,
+ WMI_TAG_EXTSCAN_BUCKET_CMD,
+ WMI_TAG_EXTSCAN_BUCKET_CHANNEL_EVENT,
+ WMI_TAG_EXTSCAN_START_CMD,
+ WMI_TAG_EXTSCAN_STOP_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_WLAN_CHANGE_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_BSSID_PARAM_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_GET_CACHED_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_GET_WLAN_CHANGE_RESULTS_CMD,
+ WMI_TAG_EXTSCAN_SET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_GET_CAPABILITIES_CMD,
+ WMI_TAG_EXTSCAN_OPERATION_EVENT,
+ WMI_TAG_EXTSCAN_START_STOP_EVENT,
+ WMI_TAG_EXTSCAN_TABLE_USAGE_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_DESCRIPTOR_EVENT,
+ WMI_TAG_EXTSCAN_RSSI_INFO_EVENT,
+ WMI_TAG_EXTSCAN_CACHED_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULTS_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_RESULT_BSSID_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MATCH_EVENT,
+ WMI_TAG_EXTSCAN_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_CACHE_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_WLAN_CHANGE_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_EXTSCAN_HOTLIST_MONITOR_CAPABILITIES_EVENT,
+ WMI_TAG_D0_WOW_ENABLE_DISABLE_CMD,
+ WMI_TAG_D0_WOW_DISABLE_ACK_EVENT,
+ WMI_TAG_UNIT_TEST_CMD,
+ WMI_TAG_ROAM_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11I_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_11R_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_ESE_OFFLOAD_TLV_PARAM,
+ WMI_TAG_ROAM_SYNCH_EVENT,
+ WMI_TAG_ROAM_SYNCH_COMPLETE,
+ WMI_TAG_EXTWOW_ENABLE_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE1_PARAMS_CMD,
+ WMI_TAG_EXTWOW_SET_APP_TYPE2_PARAMS_CMD,
+ WMI_TAG_LPI_STATUS_EVENT,
+ WMI_TAG_LPI_HANDOFF_EVENT,
+ WMI_TAG_VDEV_RATE_STATS_EVENT,
+ WMI_TAG_VDEV_RATE_HT_INFO,
+ WMI_TAG_RIC_REQUEST,
+ WMI_TAG_PDEV_GET_TEMPERATURE_CMD,
+ WMI_TAG_PDEV_TEMPERATURE_EVENT,
+ WMI_TAG_SET_DHCP_SERVER_OFFLOAD_CMD,
+ WMI_TAG_TPC_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_RIC_TSPEC,
+ WMI_TAG_TPC_CHAINMASK_CONFIG,
+ WMI_TAG_IPA_OFFLOAD_ENABLE_DISABLE_CMD,
+ WMI_TAG_SCAN_PROB_REQ_OUI_CMD,
+ WMI_TAG_KEY_MATERIAL,
+ WMI_TAG_TDLS_SET_OFFCHAN_MODE_CMD,
+ WMI_TAG_SET_LED_FLASHING_CMD,
+ WMI_TAG_MDNS_OFFLOAD_CMD,
+ WMI_TAG_MDNS_SET_FQDN_CMD,
+ WMI_TAG_MDNS_SET_RESP_CMD,
+ WMI_TAG_MDNS_GET_STATS_CMD,
+ WMI_TAG_MDNS_STATS_EVENT,
+ WMI_TAG_ROAM_INVOKE_CMD,
+ WMI_TAG_PDEV_RESUME_EVENT,
+ WMI_TAG_PDEV_SET_ANTENNA_DIVERSITY_CMD,
+ WMI_TAG_SAP_OFL_ENABLE_CMD,
+ WMI_TAG_SAP_OFL_ADD_STA_EVENT,
+ WMI_TAG_SAP_OFL_DEL_STA_EVENT,
+ WMI_TAG_APFIND_CMD_PARAM,
+ WMI_TAG_APFIND_EVENT_HDR,
+ WMI_TAG_OCB_SET_SCHED_CMD,
+ WMI_TAG_OCB_SET_SCHED_EVENT,
+ WMI_TAG_OCB_SET_CONFIG_CMD,
+ WMI_TAG_OCB_SET_CONFIG_RESP_EVENT,
+ WMI_TAG_OCB_SET_UTC_TIME_CMD,
+ WMI_TAG_OCB_START_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_STOP_TIMING_ADVERT_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_CMD,
+ WMI_TAG_OCB_GET_TSF_TIMER_RESP_EVENT,
+ WMI_TAG_DCC_GET_STATS_CMD,
+ WMI_TAG_DCC_CHANNEL_STATS_REQUEST,
+ WMI_TAG_DCC_GET_STATS_RESP_EVENT,
+ WMI_TAG_DCC_CLEAR_STATS_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_CMD,
+ WMI_TAG_DCC_UPDATE_NDL_RESP_EVENT,
+ WMI_TAG_DCC_STATS_EVENT,
+ WMI_TAG_OCB_CHANNEL,
+ WMI_TAG_OCB_SCHEDULE_ELEMENT,
+ WMI_TAG_DCC_NDL_STATS_PER_CHANNEL,
+ WMI_TAG_DCC_NDL_CHAN,
+ WMI_TAG_QOS_PARAMETER,
+ WMI_TAG_DCC_NDL_ACTIVE_STATE_CONFIG,
+ WMI_TAG_ROAM_SCAN_EXTENDED_THRESHOLD_PARAM,
+ WMI_TAG_ROAM_FILTER,
+ WMI_TAG_PASSPOINT_CONFIG_CMD,
+ WMI_TAG_PASSPOINT_EVENT_HDR,
+ WMI_TAG_EXTSCAN_CONFIGURE_HOTLIST_SSID_MONITOR_CMD,
+ WMI_TAG_EXTSCAN_HOTLIST_SSID_MATCH_EVENT,
+ WMI_TAG_VDEV_TSF_TSTAMP_ACTION_CMD,
+ WMI_TAG_VDEV_TSF_REPORT_EVENT,
+ WMI_TAG_GET_FW_MEM_DUMP,
+ WMI_TAG_UPDATE_FW_MEM_DUMP,
+ WMI_TAG_FW_MEM_DUMP_PARAMS,
+ WMI_TAG_DEBUG_MESG_FLUSH,
+ WMI_TAG_DEBUG_MESG_FLUSH_COMPLETE,
+ WMI_TAG_PEER_SET_RATE_REPORT_CONDITION,
+ WMI_TAG_ROAM_SUBNET_CHANGE_CONFIG,
+ WMI_TAG_VDEV_SET_IE_CMD,
+ WMI_TAG_RSSI_BREACH_MONITOR_CONFIG,
+ WMI_TAG_RSSI_BREACH_EVENT,
+ WMI_TAG_WOW_EVENT_INITIAL_WAKEUP,
+ WMI_TAG_SOC_SET_PCL_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_CMD,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_SOC_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_VDEV_TXRX_STREAMS,
+ WMI_TAG_SOC_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_CMD,
+ WMI_TAG_SOC_SET_DUAL_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_WOW_IOAC_SOCK_PATTERN_T,
+ WMI_TAG_WOW_ENABLE_ICMPV6_NA_FLT_CMD,
+ WMI_TAG_DIAG_EVENT_LOG_CONFIG,
+ WMI_TAG_DIAG_EVENT_LOG_SUPPORTED_EVENT_FIXED_PARAMS,
+ WMI_TAG_PACKET_FILTER_CONFIG,
+ WMI_TAG_PACKET_FILTER_ENABLE,
+ WMI_TAG_SAP_SET_BLACKLIST_PARAM_CMD,
+ WMI_TAG_MGMT_TX_SEND_CMD,
+ WMI_TAG_MGMT_TX_COMPL_EVENT,
+ WMI_TAG_SOC_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_WOW_UDP_SVC_OFLD_CMD,
+ WMI_TAG_LRO_INFO_CMD,
+ WMI_TAG_ROAM_EARLYSTOP_RSSI_THRES_PARAM,
+ WMI_TAG_SERVICE_READY_EXT_EVENT,
+ WMI_TAG_MAWC_SENSOR_REPORT_IND_CMD,
+ WMI_TAG_MAWC_ENABLE_SENSOR_EVENT,
+ WMI_TAG_ROAM_CONFIGURE_MAWC_CMD,
+ WMI_TAG_NLO_CONFIGURE_MAWC_CMD,
+ WMI_TAG_EXTSCAN_CONFIGURE_MAWC_CMD,
+ WMI_TAG_PEER_ASSOC_CONF_EVENT,
+ WMI_TAG_WOW_HOSTWAKEUP_GPIO_PIN_PATTERN_CONFIG_CMD,
+ WMI_TAG_AP_PS_EGAP_PARAM_CMD,
+ WMI_TAG_AP_PS_EGAP_INFO_EVENT,
+ WMI_TAG_PMF_OFFLOAD_SET_SA_QUERY_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_CMD,
+ WMI_TAG_TRANSFER_DATA_TO_FLASH_COMPLETE_EVENT,
+ WMI_TAG_SCPC_EVENT,
+ WMI_TAG_AP_PS_EGAP_INFO_CHAINMASK_LIST,
+ WMI_TAG_STA_SMPS_FORCE_MODE_COMPLETE_EVENT,
+ WMI_TAG_BPF_GET_CAPABILITY_CMD,
+ WMI_TAG_BPF_CAPABILITY_INFO_EVT,
+ WMI_TAG_BPF_GET_VDEV_STATS_CMD,
+ WMI_TAG_BPF_VDEV_STATS_INFO_EVT,
+ WMI_TAG_BPF_SET_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_BPF_DEL_VDEV_INSTRUCTIONS_CMD,
+ WMI_TAG_VDEV_DELETE_RESP_EVENT,
+ WMI_TAG_PEER_DELETE_RESP_EVENT,
+ WMI_TAG_ROAM_DENSE_THRES_PARAM,
+ WMI_TAG_ENLO_CANDIDATE_SCORE_PARAM,
+ WMI_TAG_PEER_UPDATE_WDS_ENTRY_CMD,
+ WMI_TAG_VDEV_CONFIG_RATEMASK,
+ WMI_TAG_PDEV_FIPS_CMD,
+ WMI_TAG_PDEV_SMART_ANT_ENABLE_CMD,
+ WMI_TAG_PDEV_SMART_ANT_SET_RX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_CMD,
+ WMI_TAG_PEER_SMART_ANT_SET_NODE_CONFIG_OPS_CMD,
+ WMI_TAG_PDEV_SET_ANT_SWITCH_TBL_CMD,
+ WMI_TAG_PDEV_SET_CTL_TABLE_CMD,
+ WMI_TAG_PDEV_SET_MIMOGAIN_TABLE_CMD,
+ WMI_TAG_FWTEST_SET_PARAM_CMD,
+ WMI_TAG_PEER_ATF_REQUEST,
+ WMI_TAG_VDEV_ATF_REQUEST,
+ WMI_TAG_PDEV_GET_ANI_CCK_CONFIG_CMD,
+ WMI_TAG_PDEV_GET_ANI_OFDM_CONFIG_CMD,
+ WMI_TAG_INST_RSSI_STATS_RESP,
+ WMI_TAG_MED_UTIL_REPORT_EVENT,
+ WMI_TAG_PEER_STA_PS_STATECHANGE_EVENT,
+ WMI_TAG_WDS_ADDR_EVENT,
+ WMI_TAG_PEER_RATECODE_LIST_EVENT,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_EVENT,
+ WMI_TAG_PDEV_TPC_EVENT,
+ WMI_TAG_ANI_OFDM_EVENT,
+ WMI_TAG_ANI_CCK_EVENT,
+ WMI_TAG_PDEV_CHANNEL_HOPPING_EVENT,
+ WMI_TAG_PDEV_FIPS_EVENT,
+ WMI_TAG_ATF_PEER_INFO,
+ WMI_TAG_PDEV_GET_TPC_CMD,
+ WMI_TAG_VDEV_FILTER_NRP_CONFIG_CMD,
+ WMI_TAG_QBOOST_CFG_CMD,
+ WMI_TAG_PDEV_SMART_ANT_GPIO_HANDLE,
+ WMI_TAG_PEER_SMART_ANT_SET_TX_ANTENNA_SERIES,
+ WMI_TAG_PEER_SMART_ANT_SET_TRAIN_ANTENNA_PARAM,
+ WMI_TAG_PDEV_SET_ANT_CTRL_CHAIN,
+ WMI_TAG_PEER_CCK_OFDM_RATE_INFO,
+ WMI_TAG_PEER_MCS_RATE_INFO,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBR,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_NFDBM,
+ WMI_TAG_PDEV_NFCAL_POWER_ALL_CHANNELS_FREQNUM,
+ WMI_TAG_MU_REPORT_TOTAL_MU,
+ WMI_TAG_VDEV_SET_DSCP_TID_MAP_CMD,
+ WMI_TAG_ROAM_SET_MBO,
+ WMI_TAG_MIB_STATS_ENABLE_CMD,
+ WMI_TAG_NAN_DISC_IFACE_CREATED_EVENT,
+ WMI_TAG_NAN_DISC_IFACE_DELETED_EVENT,
+ WMI_TAG_NAN_STARTED_CLUSTER_EVENT,
+ WMI_TAG_NAN_JOINED_CLUSTER_EVENT,
+ WMI_TAG_NDI_GET_CAP_REQ,
+ WMI_TAG_NDP_INITIATOR_REQ,
+ WMI_TAG_NDP_RESPONDER_REQ,
+ WMI_TAG_NDP_END_REQ,
+ WMI_TAG_NDI_CAP_RSP_EVENT,
+ WMI_TAG_NDP_INITIATOR_RSP_EVENT,
+ WMI_TAG_NDP_RESPONDER_RSP_EVENT,
+ WMI_TAG_NDP_END_RSP_EVENT,
+ WMI_TAG_NDP_INDICATION_EVENT,
+ WMI_TAG_NDP_CONFIRM_EVENT,
+ WMI_TAG_NDP_END_INDICATION_EVENT,
+ WMI_TAG_VDEV_SET_QUIET_CMD,
+ WMI_TAG_PDEV_SET_PCL_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_CMD,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_CMD,
+ WMI_TAG_PDEV_SET_ANTENNA_MODE_CMD,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_EVENT,
+ WMI_TAG_PDEV_HW_MODE_TRANSITION_EVENT,
+ WMI_TAG_PDEV_SET_HW_MODE_RESPONSE_VDEV_MAC_ENTRY,
+ WMI_TAG_PDEV_SET_MAC_CONFIG_RESPONSE_EVENT,
+ WMI_TAG_COEX_CONFIG_CMD,
+ WMI_TAG_CONFIG_ENHANCED_MCAST_FILTER,
+ WMI_TAG_CHAN_AVOID_RPT_ALLOW_CMD,
+ WMI_TAG_SET_PERIODIC_CHANNEL_STATS_CONFIG,
+ WMI_TAG_VDEV_SET_CUSTOM_AGGR_SIZE_CMD,
+ WMI_TAG_PDEV_WAL_POWER_DEBUG_CMD,
+ WMI_TAG_MAC_PHY_CAPABILITIES,
+ WMI_TAG_HW_MODE_CAPABILITIES,
+ WMI_TAG_SOC_MAC_PHY_HW_MODE_CAPS,
+ WMI_TAG_HAL_REG_CAPABILITIES_EXT,
+ WMI_TAG_SOC_HAL_REG_CAPABILITIES,
+ WMI_TAG_VDEV_WISA_CMD,
+ WMI_TAG_TX_POWER_LEVEL_STATS_EVT,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_PARAMETERS_TLV,
+ WMI_TAG_SCAN_ADAPTIVE_DWELL_CONFIG,
+ WMI_TAG_WOW_SET_ACTION_WAKE_UP_CMD,
+ WMI_TAG_NDP_END_RSP_PER_NDI,
+ WMI_TAG_PEER_BWF_REQUEST,
+ WMI_TAG_BWF_PEER_INFO,
+ WMI_TAG_DBGLOG_TIME_STAMP_SYNC_CMD,
+ WMI_TAG_RMC_SET_LEADER_CMD,
+ WMI_TAG_RMC_MANUAL_LEADER_EVENT,
+ WMI_TAG_PER_CHAIN_RSSI_STATS,
+ WMI_TAG_RSSI_STATS,
+ WMI_TAG_P2P_LO_START_CMD,
+ WMI_TAG_P2P_LO_STOP_CMD,
+ WMI_TAG_P2P_LO_STOPPED_EVENT,
+ WMI_TAG_REORDER_QUEUE_SETUP_CMD,
+ WMI_TAG_REORDER_QUEUE_REMOVE_CMD,
+ WMI_TAG_SET_MULTIPLE_MCAST_FILTER_CMD,
+ WMI_TAG_MGMT_TX_COMPL_BUNDLE_EVENT,
+ WMI_TAG_READ_DATA_FROM_FLASH_CMD,
+ WMI_TAG_READ_DATA_FROM_FLASH_EVENT,
+ WMI_TAG_PDEV_SET_REORDER_TIMEOUT_VAL_CMD,
+ WMI_TAG_PEER_SET_RX_BLOCKSIZE_CMD,
+ WMI_TAG_PDEV_SET_WAKEUP_CONFIG_CMDID,
+ WMI_TAG_TLV_BUF_LEN_PARAM,
+ WMI_TAG_SERVICE_AVAILABLE_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO_REQ_CMD,
+ WMI_TAG_PEER_ANTDIV_INFO_EVENT,
+ WMI_TAG_PEER_ANTDIV_INFO,
+ WMI_TAG_PDEV_GET_ANTDIV_STATUS_CMD,
+ WMI_TAG_PDEV_ANTDIV_STATUS_EVENT,
+ WMI_TAG_MNT_FILTER_CMD,
+ WMI_TAG_GET_CHIP_POWER_STATS_CMD,
+ WMI_TAG_PDEV_CHIP_POWER_STATS_EVENT,
+ WMI_TAG_COEX_GET_ANTENNA_ISOLATION_CMD,
+ WMI_TAG_COEX_REPORT_ISOLATION_EVENT,
+ WMI_TAG_CHAN_CCA_STATS,
+ WMI_TAG_PEER_SIGNAL_STATS,
+ WMI_TAG_TX_STATS,
+ WMI_TAG_PEER_AC_TX_STATS,
+ WMI_TAG_RX_STATS,
+ WMI_TAG_PEER_AC_RX_STATS,
+ WMI_TAG_REPORT_STATS_EVENT,
+ WMI_TAG_CHAN_CCA_STATS_THRESH,
+ WMI_TAG_PEER_SIGNAL_STATS_THRESH,
+ WMI_TAG_TX_STATS_THRESH,
+ WMI_TAG_RX_STATS_THRESH,
+ WMI_TAG_PDEV_SET_STATS_THRESHOLD_CMD,
+ WMI_TAG_REQUEST_WLAN_STATS_CMD,
+ WMI_TAG_RX_AGGR_FAILURE_EVENT,
+ WMI_TAG_RX_AGGR_FAILURE_INFO,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_REQ_CMD,
+ WMI_TAG_VDEV_ENCRYPT_DECRYPT_DATA_RESP_EVENT,
+ WMI_TAG_PDEV_BAND_TO_MAC,
+ WMI_TAG_TBTT_OFFSET_INFO,
+ WMI_TAG_TBTT_OFFSET_EXT_EVENT,
+ WMI_TAG_SAR_LIMITS_CMD,
+ WMI_TAG_SAR_LIMIT_CMD_ROW,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_ENABLE_CMD,
+ WMI_TAG_PDEV_DFS_PHYERR_OFFLOAD_DISABLE_CMD,
+ WMI_TAG_VDEV_ADFS_CH_CFG_CMD,
+ WMI_TAG_VDEV_ADFS_OCAC_ABORT_CMD,
+ WMI_TAG_PDEV_DFS_RADAR_DETECTION_EVENT,
+ WMI_TAG_VDEV_ADFS_OCAC_COMPLETE_EVENT,
+ WMI_TAG_VDEV_DFS_CAC_COMPLETE_EVENT,
+ WMI_TAG_VENDOR_OUI,
+ WMI_TAG_REQUEST_RCPI_CMD,
+ WMI_TAG_UPDATE_RCPI_EVENT,
+ WMI_TAG_REQUEST_PEER_STATS_INFO_CMD,
+ WMI_TAG_PEER_STATS_INFO,
+ WMI_TAG_PEER_STATS_INFO_EVENT,
+ WMI_TAG_PKGID_EVENT,
+ WMI_TAG_CONNECTED_NLO_RSSI_PARAMS,
+ WMI_TAG_SET_CURRENT_COUNTRY_CMD,
+ WMI_TAG_REGULATORY_RULE_STRUCT,
+ WMI_TAG_REG_CHAN_LIST_CC_EVENT,
+ WMI_TAG_11D_SCAN_START_CMD,
+ WMI_TAG_11D_SCAN_STOP_CMD,
+ WMI_TAG_11D_NEW_COUNTRY_EVENT,
+ WMI_TAG_REQUEST_RADIO_CHAN_STATS_CMD,
+ WMI_TAG_RADIO_CHAN_STATS,
+ WMI_TAG_RADIO_CHAN_STATS_EVENT,
+ WMI_TAG_ROAM_PER_CONFIG,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_CMD,
+ WMI_TAG_VDEV_ADD_MAC_ADDR_TO_RX_FILTER_STATUS_EVENT,
+ WMI_TAG_BPF_SET_VDEV_ACTIVE_MODE_CMD,
+ WMI_TAG_HW_DATA_FILTER_CMD,
+ WMI_TAG_CONNECTED_NLO_BSS_BAND_RSSI_PREF,
+ WMI_TAG_PEER_OPER_MODE_CHANGE_EVENT,
+ WMI_TAG_CHIP_POWER_SAVE_FAILURE_DETECTED,
+ WMI_TAG_PDEV_MULTIPLE_VDEV_RESTART_REQUEST_CMD,
+ WMI_TAG_PDEV_CSA_SWITCH_COUNT_STATUS_EVENT,
+ WMI_TAG_PDEV_UPDATE_PKT_ROUTING_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_CMD,
+ WMI_TAG_PDEV_CHECK_CAL_VERSION_EVENT,
+ WMI_TAG_PDEV_SET_DIVERSITY_GAIN_CMD,
+ WMI_TAG_MAC_PHY_CHAINMASK_COMBO,
+ WMI_TAG_MAC_PHY_CHAINMASK_CAPABILITY,
+ WMI_TAG_VDEV_SET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_CMD,
+ WMI_TAG_VDEV_GET_ARP_STATS_EVENT,
+ WMI_TAG_IFACE_OFFLOAD_STATS,
+ WMI_TAG_REQUEST_STATS_CMD_SUB_STRUCT_PARAM,
+ WMI_TAG_RSSI_CTL_EXT,
+ WMI_TAG_SINGLE_PHYERR_EXT_RX_HDR,
+ WMI_TAG_COEX_BT_ACTIVITY_EVENT,
+ WMI_TAG_VDEV_GET_TX_POWER_CMD,
+ WMI_TAG_VDEV_TX_POWER_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_COMPL_EVENT,
+ WMI_TAG_OFFCHAN_DATA_TX_SEND_CMD,
+ WMI_TAG_TX_SEND_PARAMS,
+ WMI_TAG_HE_RATE_SET,
+ WMI_TAG_CONGESTION_STATS,
+ WMI_TAG_SET_INIT_COUNTRY_CMD,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE,
+ WMI_TAG_SCAN_DBS_DUTY_CYCLE_PARAM_TLV,
+ WMI_TAG_PDEV_DIV_GET_RSSI_ANTID,
+ WMI_TAG_THERM_THROT_CONFIG_REQUEST,
+ WMI_TAG_THERM_THROT_LEVEL_CONFIG_INFO,
+ WMI_TAG_THERM_THROT_STATS_EVENT,
+ WMI_TAG_THERM_THROT_LEVEL_STATS_INFO,
+ WMI_TAG_PDEV_DIV_RSSI_ANTID_EVENT,
+ WMI_TAG_OEM_DMA_RING_CAPABILITIES,
+ WMI_TAG_OEM_DMA_RING_CFG_REQ,
+ WMI_TAG_OEM_DMA_RING_CFG_RSP,
+ WMI_TAG_OEM_INDIRECT_DATA,
+ WMI_TAG_OEM_DMA_BUF_RELEASE,
+ WMI_TAG_OEM_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST,
+ WMI_TAG_PDEV_BSS_CHAN_INFO_EVENT,
+ WMI_TAG_ROAM_LCA_DISALLOW_CONFIG,
+ WMI_TAG_VDEV_LIMIT_OFFCHAN_CMD,
+ WMI_TAG_ROAM_RSSI_REJECTION_OCE_CONFIG,
+ WMI_TAG_UNIT_TEST_EVENT,
+ WMI_TAG_ROAM_FILS_OFFLOAD,
+ WMI_TAG_PDEV_UPDATE_PMK_CACHE_CMD,
+ WMI_TAG_PMK_CACHE,
+ WMI_TAG_PDEV_UPDATE_FILS_HLP_PKT_CMD,
+ WMI_TAG_ROAM_FILS_SYNCH,
+ WMI_TAG_GTK_OFFLOAD_EXTENDED,
+ WMI_TAG_ROAM_BG_SCAN_ROAMING,
+ WMI_TAG_OIC_PING_OFFLOAD_PARAMS_CMD,
+ WMI_TAG_OIC_PING_OFFLOAD_SET_ENABLE_CMD,
+ WMI_TAG_OIC_PING_HANDOFF_EVENT,
+ WMI_TAG_DHCP_LEASE_RENEW_OFFLOAD_CMD,
+ WMI_TAG_DHCP_LEASE_RENEW_EVENT,
+ WMI_TAG_BTM_CONFIG,
+ WMI_TAG_DEBUG_MESG_FW_DATA_STALL,
+ WMI_TAG_WLM_CONFIG_CMD,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_REQUEST,
+ WMI_TAG_PDEV_UPDATE_CTLTABLE_EVENT,
+ WMI_TAG_ROAM_CND_SCORING_PARAM,
+ WMI_TAG_PDEV_CONFIG_VENDOR_OUI_ACTION,
+ WMI_TAG_VENDOR_OUI_EXT,
+ WMI_TAG_ROAM_SYNCH_FRAME_EVENT,
+ WMI_TAG_FD_SEND_FROM_HOST_CMD,
+ WMI_TAG_ENABLE_FILS_CMD,
+ WMI_TAG_HOST_SWFDA_EVENT,
+ WMI_TAG_BCN_OFFLOAD_CTRL_CMD,
+ WMI_TAG_PDEV_SET_AC_TX_QUEUE_OPTIMIZED_CMD,
+ WMI_TAG_STATS_PERIOD,
+ WMI_TAG_NDL_SCHEDULE_UPDATE,
+ WMI_TAG_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMD,
+ WMI_TAG_MSDUQ_QDEPTH_THRESH_UPDATE,
+ WMI_TAG_PDEV_SET_RX_FILTER_PROMISCUOUS_CMD,
+ WMI_TAG_SAR2_RESULT_EVENT,
+ WMI_TAG_SAR_CAPABILITIES,
+ WMI_TAG_SAP_OBSS_DETECTION_CFG_CMD,
+ WMI_TAG_SAP_OBSS_DETECTION_INFO_EVT,
+ WMI_TAG_DMA_RING_CAPABILITIES,
+ WMI_TAG_DMA_RING_CFG_REQ,
+ WMI_TAG_DMA_RING_CFG_RSP,
+ WMI_TAG_DMA_BUF_RELEASE,
+ WMI_TAG_DMA_BUF_RELEASE_ENTRY,
+ WMI_TAG_SAR_GET_LIMITS_CMD,
+ WMI_TAG_SAR_GET_LIMITS_EVENT,
+ WMI_TAG_SAR_GET_LIMITS_EVENT_ROW,
+ WMI_TAG_OFFLOAD_11K_REPORT,
+ WMI_TAG_INVOKE_NEIGHBOR_REPORT,
+ WMI_TAG_NEIGHBOR_REPORT_OFFLOAD,
+ WMI_TAG_VDEV_SET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_VDEV_GET_CONNECTIVITY_CHECK_STATS,
+ WMI_TAG_BPF_SET_VDEV_ENABLE_CMD,
+ WMI_TAG_BPF_SET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_CMD,
+ WMI_TAG_BPF_GET_VDEV_WORK_MEMORY_RESP_EVT,
+ WMI_TAG_PDEV_GET_NFCAL_POWER,
+ WMI_TAG_BSS_COLOR_CHANGE_ENABLE,
+ WMI_TAG_OBSS_COLOR_COLLISION_DET_CONFIG,
+ WMI_TAG_OBSS_COLOR_COLLISION_EVT,
+ WMI_TAG_RUNTIME_DPD_RECAL_CMD,
+ WMI_TAG_TWT_ENABLE_CMD,
+ WMI_TAG_TWT_DISABLE_CMD,
+ WMI_TAG_TWT_ADD_DIALOG_CMD,
+ WMI_TAG_TWT_DEL_DIALOG_CMD,
+ WMI_TAG_TWT_PAUSE_DIALOG_CMD,
+ WMI_TAG_TWT_RESUME_DIALOG_CMD,
+ WMI_TAG_TWT_ENABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_DISABLE_COMPLETE_EVENT,
+ WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_DEL_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_PAUSE_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_TWT_RESUME_DIALOG_COMPLETE_EVENT,
+ WMI_TAG_REQUEST_ROAM_SCAN_STATS_CMD,
+ WMI_TAG_ROAM_SCAN_STATS_EVENT,
+ WMI_TAG_PEER_TID_CONFIGURATIONS_CMD,
+ WMI_TAG_VDEV_SET_CUSTOM_SW_RETRY_TH_CMD,
+ WMI_TAG_GET_TPC_POWER_CMD,
+ WMI_TAG_GET_TPC_POWER_EVENT,
+ WMI_TAG_DMA_BUF_RELEASE_SPECTRAL_META_DATA,
+ WMI_TAG_MOTION_DET_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_CONFIG_PARAMS_CMD,
+ WMI_TAG_MOTION_DET_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_BASE_LINE_START_STOP_CMD,
+ WMI_TAG_MOTION_DET_EVENT,
+ WMI_TAG_MOTION_DET_BASE_LINE_EVENT,
+ WMI_TAG_NDP_TRANSPORT_IP,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_CMD,
+ WMI_TAG_ESP_ESTIMATE_EVENT,
+ WMI_TAG_NAN_HOST_CONFIG,
+ WMI_TAG_SPECTRAL_BIN_SCALING_PARAMS,
+ WMI_TAG_PEER_CFR_CAPTURE_CMD,
+ WMI_TAG_PEER_CHAN_WIDTH_SWITCH_CMD,
+ WMI_TAG_CHAN_WIDTH_PEER_LIST,
+ WMI_TAG_OBSS_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMD,
+ WMI_TAG_PDEV_HE_TB_ACTION_FRM_CMD,
+ WMI_TAG_PEER_EXTD2_STATS,
+ WMI_TAG_HPCS_PULSE_START_CMD,
+ WMI_TAG_PDEV_CTL_FAILSAFE_CHECK_EVENT,
+ WMI_TAG_VDEV_CHAINMASK_CONFIG_CMD,
+ WMI_TAG_VDEV_BCN_OFFLOAD_QUIET_CONFIG_CMD,
+ WMI_TAG_NAN_EVENT_INFO,
+ WMI_TAG_NDP_CHANNEL_INFO,
+ WMI_TAG_NDP_CMD,
+ WMI_TAG_NDP_EVENT,
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_CMD = 0x301,
+ WMI_TAG_PDEV_PEER_PKTLOG_FILTER_INFO,
+ WMI_TAG_FILS_DISCOVERY_TMPL_CMD = 0x344,
+ WMI_TAG_PDEV_SRG_BSS_COLOR_BITMAP_CMD = 0x37b,
+ WMI_TAG_PDEV_SRG_PARTIAL_BSSID_BITMAP_CMD,
+ WMI_TAG_PDEV_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD = 0x381,
+ WMI_TAG_PDEV_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_NON_SRG_OBSS_COLOR_ENABLE_BITMAP_CMD,
+ WMI_TAG_PDEV_NON_SRG_OBSS_BSSID_ENABLE_BITMAP_CMD,
+ WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
+ WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+ WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
+ WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
+ WMI_TAG_MAX
+};
+
+enum wmi_tlv_service {
+ WMI_TLV_SERVICE_BEACON_OFFLOAD = 0,
+ WMI_TLV_SERVICE_SCAN_OFFLOAD = 1,
+ WMI_TLV_SERVICE_ROAM_SCAN_OFFLOAD = 2,
+ WMI_TLV_SERVICE_BCN_MISS_OFFLOAD = 3,
+ WMI_TLV_SERVICE_STA_PWRSAVE = 4,
+ WMI_TLV_SERVICE_STA_ADVANCED_PWRSAVE = 5,
+ WMI_TLV_SERVICE_AP_UAPSD = 6,
+ WMI_TLV_SERVICE_AP_DFS = 7,
+ WMI_TLV_SERVICE_11AC = 8,
+ WMI_TLV_SERVICE_BLOCKACK = 9,
+ WMI_TLV_SERVICE_PHYERR = 10,
+ WMI_TLV_SERVICE_BCN_FILTER = 11,
+ WMI_TLV_SERVICE_RTT = 12,
+ WMI_TLV_SERVICE_WOW = 13,
+ WMI_TLV_SERVICE_RATECTRL_CACHE = 14,
+ WMI_TLV_SERVICE_IRAM_TIDS = 15,
+ WMI_TLV_SERVICE_ARPNS_OFFLOAD = 16,
+ WMI_TLV_SERVICE_NLO = 17,
+ WMI_TLV_SERVICE_GTK_OFFLOAD = 18,
+ WMI_TLV_SERVICE_SCAN_SCH = 19,
+ WMI_TLV_SERVICE_CSA_OFFLOAD = 20,
+ WMI_TLV_SERVICE_CHATTER = 21,
+ WMI_TLV_SERVICE_COEX_FREQAVOID = 22,
+ WMI_TLV_SERVICE_PACKET_POWER_SAVE = 23,
+ WMI_TLV_SERVICE_FORCE_FW_HANG = 24,
+ WMI_TLV_SERVICE_GPIO = 25,
+ WMI_TLV_SERVICE_STA_DTIM_PS_MODULATED_DTIM = 26,
+ WMI_STA_UAPSD_BASIC_AUTO_TRIG = 27,
+ WMI_STA_UAPSD_VAR_AUTO_TRIG = 28,
+ WMI_TLV_SERVICE_STA_KEEP_ALIVE = 29,
+ WMI_TLV_SERVICE_TX_ENCAP = 30,
+ WMI_TLV_SERVICE_AP_PS_DETECT_OUT_OF_SYNC = 31,
+ WMI_TLV_SERVICE_EARLY_RX = 32,
+ WMI_TLV_SERVICE_STA_SMPS = 33,
+ WMI_TLV_SERVICE_FWTEST = 34,
+ WMI_TLV_SERVICE_STA_WMMAC = 35,
+ WMI_TLV_SERVICE_TDLS = 36,
+ WMI_TLV_SERVICE_BURST = 37,
+ WMI_TLV_SERVICE_MCC_BCN_INTERVAL_CHANGE = 38,
+ WMI_TLV_SERVICE_ADAPTIVE_OCS = 39,
+ WMI_TLV_SERVICE_BA_SSN_SUPPORT = 40,
+ WMI_TLV_SERVICE_FILTER_IPSEC_NATKEEPALIVE = 41,
+ WMI_TLV_SERVICE_WLAN_HB = 42,
+ WMI_TLV_SERVICE_LTE_ANT_SHARE_SUPPORT = 43,
+ WMI_TLV_SERVICE_BATCH_SCAN = 44,
+ WMI_TLV_SERVICE_QPOWER = 45,
+ WMI_TLV_SERVICE_PLMREQ = 46,
+ WMI_TLV_SERVICE_THERMAL_MGMT = 47,
+ WMI_TLV_SERVICE_RMC = 48,
+ WMI_TLV_SERVICE_MHF_OFFLOAD = 49,
+ WMI_TLV_SERVICE_COEX_SAR = 50,
+ WMI_TLV_SERVICE_BCN_TXRATE_OVERRIDE = 51,
+ WMI_TLV_SERVICE_NAN = 52,
+ WMI_TLV_SERVICE_L1SS_STAT = 53,
+ WMI_TLV_SERVICE_ESTIMATE_LINKSPEED = 54,
+ WMI_TLV_SERVICE_OBSS_SCAN = 55,
+ WMI_TLV_SERVICE_TDLS_OFFCHAN = 56,
+ WMI_TLV_SERVICE_TDLS_UAPSD_BUFFER_STA = 57,
+ WMI_TLV_SERVICE_TDLS_UAPSD_SLEEP_STA = 58,
+ WMI_TLV_SERVICE_IBSS_PWRSAVE = 59,
+ WMI_TLV_SERVICE_LPASS = 60,
+ WMI_TLV_SERVICE_EXTSCAN = 61,
+ WMI_TLV_SERVICE_D0WOW = 62,
+ WMI_TLV_SERVICE_HSOFFLOAD = 63,
+ WMI_TLV_SERVICE_ROAM_HO_OFFLOAD = 64,
+ WMI_TLV_SERVICE_RX_FULL_REORDER = 65,
+ WMI_TLV_SERVICE_DHCP_OFFLOAD = 66,
+ WMI_TLV_SERVICE_STA_RX_IPA_OFFLOAD_SUPPORT = 67,
+ WMI_TLV_SERVICE_MDNS_OFFLOAD = 68,
+ WMI_TLV_SERVICE_SAP_AUTH_OFFLOAD = 69,
+ WMI_TLV_SERVICE_DUAL_BAND_SIMULTANEOUS_SUPPORT = 70,
+ WMI_TLV_SERVICE_OCB = 71,
+ WMI_TLV_SERVICE_AP_ARPNS_OFFLOAD = 72,
+ WMI_TLV_SERVICE_PER_BAND_CHAINMASK_SUPPORT = 73,
+ WMI_TLV_SERVICE_PACKET_FILTER_OFFLOAD = 74,
+ WMI_TLV_SERVICE_MGMT_TX_HTT = 75,
+ WMI_TLV_SERVICE_MGMT_TX_WMI = 76,
+ WMI_TLV_SERVICE_EXT_MSG = 77,
+ WMI_TLV_SERVICE_MAWC = 78,
+ WMI_TLV_SERVICE_PEER_ASSOC_CONF = 79,
+ WMI_TLV_SERVICE_EGAP = 80,
+ WMI_TLV_SERVICE_STA_PMF_OFFLOAD = 81,
+ WMI_TLV_SERVICE_UNIFIED_WOW_CAPABILITY = 82,
+ WMI_TLV_SERVICE_ENHANCED_PROXY_STA = 83,
+ WMI_TLV_SERVICE_ATF = 84,
+ WMI_TLV_SERVICE_COEX_GPIO = 85,
+ WMI_TLV_SERVICE_AUX_SPECTRAL_INTF = 86,
+ WMI_TLV_SERVICE_AUX_CHAN_LOAD_INTF = 87,
+ WMI_TLV_SERVICE_BSS_CHANNEL_INFO_64 = 88,
+ WMI_TLV_SERVICE_ENTERPRISE_MESH = 89,
+ WMI_TLV_SERVICE_RESTRT_CHNL_SUPPORT = 90,
+ WMI_TLV_SERVICE_BPF_OFFLOAD = 91,
+ WMI_TLV_SERVICE_SYNC_DELETE_CMDS = 92,
+ WMI_TLV_SERVICE_SMART_ANTENNA_SW_SUPPORT = 93,
+ WMI_TLV_SERVICE_SMART_ANTENNA_HW_SUPPORT = 94,
+ WMI_TLV_SERVICE_RATECTRL_LIMIT_MAX_MIN_RATES = 95,
+ WMI_TLV_SERVICE_NAN_DATA = 96,
+ WMI_TLV_SERVICE_NAN_RTT = 97,
+ WMI_TLV_SERVICE_11AX = 98,
+ WMI_TLV_SERVICE_DEPRECATED_REPLACE = 99,
+ WMI_TLV_SERVICE_TDLS_CONN_TRACKER_IN_HOST_MODE = 100,
+ WMI_TLV_SERVICE_ENHANCED_MCAST_FILTER = 101,
+ WMI_TLV_SERVICE_PERIODIC_CHAN_STAT_SUPPORT = 102,
+ WMI_TLV_SERVICE_MESH_11S = 103,
+ WMI_TLV_SERVICE_HALF_RATE_QUARTER_RATE_SUPPORT = 104,
+ WMI_TLV_SERVICE_VDEV_RX_FILTER = 105,
+ WMI_TLV_SERVICE_P2P_LISTEN_OFFLOAD_SUPPORT = 106,
+ WMI_TLV_SERVICE_MARK_FIRST_WAKEUP_PACKET = 107,
+ WMI_TLV_SERVICE_MULTIPLE_MCAST_FILTER_SET = 108,
+ WMI_TLV_SERVICE_HOST_MANAGED_RX_REORDER = 109,
+ WMI_TLV_SERVICE_FLASH_RDWR_SUPPORT = 110,
+ WMI_TLV_SERVICE_WLAN_STATS_REPORT = 111,
+ WMI_TLV_SERVICE_TX_MSDU_ID_NEW_PARTITION_SUPPORT = 112,
+ WMI_TLV_SERVICE_DFS_PHYERR_OFFLOAD = 113,
+ WMI_TLV_SERVICE_RCPI_SUPPORT = 114,
+ WMI_TLV_SERVICE_FW_MEM_DUMP_SUPPORT = 115,
+ WMI_TLV_SERVICE_PEER_STATS_INFO = 116,
+ WMI_TLV_SERVICE_REGULATORY_DB = 117,
+ WMI_TLV_SERVICE_11D_OFFLOAD = 118,
+ WMI_TLV_SERVICE_HW_DATA_FILTERING = 119,
+ WMI_TLV_SERVICE_MULTIPLE_VDEV_RESTART = 120,
+ WMI_TLV_SERVICE_PKT_ROUTING = 121,
+ WMI_TLV_SERVICE_CHECK_CAL_VERSION = 122,
+ WMI_TLV_SERVICE_OFFCHAN_TX_WMI = 123,
+ WMI_TLV_SERVICE_8SS_TX_BFEE = 124,
+ WMI_TLV_SERVICE_EXTENDED_NSS_SUPPORT = 125,
+ WMI_TLV_SERVICE_ACK_TIMEOUT = 126,
+ WMI_TLV_SERVICE_PDEV_BSS_CHANNEL_INFO_64 = 127,
+
+ /* The first 128 bits */
+ WMI_MAX_SERVICE = 128,
+
+ WMI_TLV_SERVICE_CHAN_LOAD_INFO = 128,
+ WMI_TLV_SERVICE_TX_PPDU_INFO_STATS_SUPPORT = 129,
+ WMI_TLV_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT = 130,
+ WMI_TLV_SERVICE_FILS_SUPPORT = 131,
+ WMI_TLV_SERVICE_WLAN_OIC_PING_OFFLOAD = 132,
+ WMI_TLV_SERVICE_WLAN_DHCP_RENEW = 133,
+ WMI_TLV_SERVICE_MAWC_SUPPORT = 134,
+ WMI_TLV_SERVICE_VDEV_LATENCY_CONFIG = 135,
+ WMI_TLV_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT = 136,
+ WMI_TLV_SERVICE_PKTLOG_SUPPORT_OVER_HTT = 137,
+ WMI_TLV_SERVICE_VDEV_MULTI_GROUP_KEY_SUPPORT = 138,
+ WMI_TLV_SERVICE_SCAN_PHYMODE_SUPPORT = 139,
+ WMI_TLV_SERVICE_THERM_THROT = 140,
+ WMI_TLV_SERVICE_BCN_OFFLOAD_START_STOP_SUPPORT = 141,
+ WMI_TLV_SERVICE_WOW_WAKEUP_BY_TIMER_PATTERN = 142,
+ WMI_TLV_SERVICE_PEER_MAP_UNMAP_V2_SUPPORT = 143,
+ WMI_TLV_SERVICE_OFFCHAN_DATA_TID_SUPPORT = 144,
+ WMI_TLV_SERVICE_RX_PROMISC_ENABLE_SUPPORT = 145,
+ WMI_TLV_SERVICE_SUPPORT_DIRECT_DMA = 146,
+ WMI_TLV_SERVICE_AP_OBSS_DETECTION_OFFLOAD = 147,
+ WMI_TLV_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT = 148,
+ WMI_TLV_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT = 149,
+ WMI_TLV_SERVICE_BSS_COLOR_OFFLOAD = 150,
+ WMI_TLV_SERVICE_RUNTIME_DPD_RECAL = 151,
+ WMI_TLV_SERVICE_STA_TWT = 152,
+ WMI_TLV_SERVICE_AP_TWT = 153,
+ WMI_TLV_SERVICE_GMAC_OFFLOAD_SUPPORT = 154,
+ WMI_TLV_SERVICE_SPOOF_MAC_SUPPORT = 155,
+ WMI_TLV_SERVICE_PEER_TID_CONFIGS_SUPPORT = 156,
+ WMI_TLV_SERVICE_VDEV_SWRETRY_PER_AC_CONFIG_SUPPORT = 157,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_SCC_SUPPORT = 158,
+ WMI_TLV_SERVICE_DUAL_BEACON_ON_SINGLE_MAC_MCC_SUPPORT = 159,
+ WMI_TLV_SERVICE_MOTION_DET = 160,
+ WMI_TLV_SERVICE_INFRA_MBSSID = 161,
+ WMI_TLV_SERVICE_OBSS_SPATIAL_REUSE = 162,
+ WMI_TLV_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT = 163,
+ WMI_TLV_SERVICE_NAN_DBS_SUPPORT = 164,
+ WMI_TLV_SERVICE_NDI_DBS_SUPPORT = 165,
+ WMI_TLV_SERVICE_NAN_SAP_SUPPORT = 166,
+ WMI_TLV_SERVICE_NDI_SAP_SUPPORT = 167,
+ WMI_TLV_SERVICE_CFR_CAPTURE_SUPPORT = 168,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_MSG_TYPE_1 = 169,
+ WMI_TLV_SERVICE_ESP_SUPPORT = 170,
+ WMI_TLV_SERVICE_PEER_CHWIDTH_CHANGE = 171,
+ WMI_TLV_SERVICE_WLAN_HPCS_PULSE = 172,
+ WMI_TLV_SERVICE_PER_VDEV_CHAINMASK_CONFIG_SUPPORT = 173,
+ WMI_TLV_SERVICE_TX_DATA_MGMT_ACK_RSSI = 174,
+ WMI_TLV_SERVICE_NAN_DISABLE_SUPPORT = 175,
+ WMI_TLV_SERVICE_HTT_H2T_NO_HTC_HDR_LEN_IN_MSG_LEN = 176,
+ WMI_TLV_SERVICE_COEX_SUPPORT_UNEQUAL_ISOLATION = 177,
+ WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT = 178,
+ WMI_TLV_SERVICE_SUPPORT_EXTEND_ADDRESS = 179,
+ WMI_TLV_SERVICE_BEACON_RECEPTION_STATS = 180,
+ WMI_TLV_SERVICE_FETCH_TX_PN = 181,
+ WMI_TLV_SERVICE_PEER_UNMAP_RESPONSE_SUPPORT = 182,
+ WMI_TLV_SERVICE_TX_PER_PEER_AMPDU_SIZE = 183,
+ WMI_TLV_SERVICE_BSS_COLOR_SWITCH_COUNT = 184,
+ WMI_TLV_SERVICE_HTT_PEER_STATS_SUPPORT = 185,
+ WMI_TLV_SERVICE_UL_RU26_ALLOWED = 186,
+ WMI_TLV_SERVICE_GET_MWS_COEX_STATE = 187,
+ WMI_TLV_SERVICE_GET_MWS_DPWB_STATE = 188,
+ WMI_TLV_SERVICE_GET_MWS_TDM_STATE = 189,
+ WMI_TLV_SERVICE_GET_MWS_IDRX_STATE = 190,
+ WMI_TLV_SERVICE_GET_MWS_ANTENNA_SHARING_STATE = 191,
+ WMI_TLV_SERVICE_ENHANCED_TPC_CONFIG_EVENT = 192,
+ WMI_TLV_SERVICE_WLM_STATS_REQUEST = 193,
+ WMI_TLV_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT = 194,
+ WMI_TLV_SERVICE_WPA3_FT_SAE_SUPPORT = 195,
+ WMI_TLV_SERVICE_WPA3_FT_SUITE_B_SUPPORT = 196,
+ WMI_TLV_SERVICE_VOW_ENABLE = 197,
+ WMI_TLV_SERVICE_CFR_CAPTURE_IND_EVT_TYPE_1 = 198,
+ WMI_TLV_SERVICE_BROADCAST_TWT = 199,
+ WMI_TLV_SERVICE_RAP_DETECTION_SUPPORT = 200,
+ WMI_TLV_SERVICE_PS_TDCC = 201,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_LEGACY = 202,
+ WMI_TLV_SERVICE_THREE_WAY_COEX_CONFIG_OVERRIDE = 203,
+ WMI_TLV_SERVICE_TX_PWR_PER_PEER = 204,
+ WMI_TLV_SERVICE_STA_PLUS_STA_SUPPORT = 205,
+ WMI_TLV_SERVICE_WPA3_FT_FILS = 206,
+ WMI_TLV_SERVICE_ADAPTIVE_11R_ROAM = 207,
+ WMI_TLV_SERVICE_CHAN_RF_CHARACTERIZATION_INFO = 208,
+ WMI_TLV_SERVICE_FW_IFACE_COMBINATION_SUPPORT = 209,
+ WMI_TLV_SERVICE_TX_COMPL_TSF64 = 210,
+ WMI_TLV_SERVICE_DSM_ROAM_FILTER = 211,
+ WMI_TLV_SERVICE_PACKET_CAPTURE_SUPPORT = 212,
+ WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
+ WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
+ WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT = 246,
+ WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
+ WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
+ WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE = 263,
+
+ /* The second 128 bits */
+ WMI_MAX_EXT_SERVICE = 256,
+ WMI_TLV_SERVICE_SCAN_CONFIG_PER_CHANNEL = 265,
+ WMI_TLV_SERVICE_REG_CC_EXT_EVENT_SUPPORT = 281,
+ WMI_TLV_SERVICE_BIOS_SAR_SUPPORT = 326,
+ WMI_TLV_SERVICE_SUPPORT_11D_FOR_HOST_SCAN = 357,
+
+ /* The third 128 bits */
+ WMI_MAX_EXT2_SERVICE = 384
+};
+
+enum {
+ WMI_SMPS_FORCED_MODE_NONE = 0,
+ WMI_SMPS_FORCED_MODE_DISABLED,
+ WMI_SMPS_FORCED_MODE_STATIC,
+ WMI_SMPS_FORCED_MODE_DYNAMIC
+};
+
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_2G 0
+#define WMI_TPC_CHAINMASK_CONFIG_BAND_5G 1
+#define WMI_NUM_SUPPORTED_BAND_MAX 2
+
+#define WMI_PEER_MIMO_PS_STATE 0x1
+#define WMI_PEER_AMPDU 0x2
+#define WMI_PEER_AUTHORIZE 0x3
+#define WMI_PEER_CHWIDTH 0x4
+#define WMI_PEER_NSS 0x5
+#define WMI_PEER_USE_4ADDR 0x6
+#define WMI_PEER_MEMBERSHIP 0x7
+#define WMI_PEER_USERPOS 0x8
+#define WMI_PEER_CRIT_PROTO_HINT_ENABLED 0x9
+#define WMI_PEER_TX_FAIL_CNT_THR 0xA
+#define WMI_PEER_SET_HW_RETRY_CTS2S 0xB
+#define WMI_PEER_IBSS_ATIM_WINDOW_LENGTH 0xC
+#define WMI_PEER_PHYMODE 0xD
+#define WMI_PEER_USE_FIXED_PWR 0xE
+#define WMI_PEER_PARAM_FIXED_RATE 0xF
+#define WMI_PEER_SET_MU_WHITELIST 0x10
+#define WMI_PEER_SET_MAX_TX_RATE 0x11
+#define WMI_PEER_SET_MIN_TX_RATE 0x12
+#define WMI_PEER_SET_DEFAULT_ROUTING 0x13
+
+/* slot time long */
+#define WMI_VDEV_SLOT_TIME_LONG 0x1
+/* slot time short */
+#define WMI_VDEV_SLOT_TIME_SHORT 0x2
+/* preablbe long */
+#define WMI_VDEV_PREAMBLE_LONG 0x1
+/* preablbe short */
+#define WMI_VDEV_PREAMBLE_SHORT 0x2
+
+enum wmi_peer_smps_state {
+ WMI_PEER_SMPS_PS_NONE = 0x0,
+ WMI_PEER_SMPS_STATIC = 0x1,
+ WMI_PEER_SMPS_DYNAMIC = 0x2
+};
+
+enum wmi_peer_chwidth {
+ WMI_PEER_CHWIDTH_20MHZ = 0,
+ WMI_PEER_CHWIDTH_40MHZ = 1,
+ WMI_PEER_CHWIDTH_80MHZ = 2,
+ WMI_PEER_CHWIDTH_160MHZ = 3,
+};
+
+enum wmi_beacon_gen_mode {
+ WMI_BEACON_STAGGERED_MODE = 0,
+ WMI_BEACON_BURST_MODE = 1
+};
+
+enum wmi_direct_buffer_module {
+ WMI_DIRECT_BUF_SPECTRAL = 0,
+ WMI_DIRECT_BUF_CFR = 1,
+
+ /* keep it last */
+ WMI_DIRECT_BUF_MAX
+};
+
+/* enum wmi_nss_ratio - NSS ratio received from FW during service ready ext
+ * event
+ * WMI_NSS_RATIO_1BY2_NSS -Max nss of 160MHz is equals to half of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_3BY4_NSS - Max nss of 160MHz is equals to 3/4 of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_1_NSS - Max nss of 160MHz is equals to the max nss of 80MHz
+ * WMI_NSS_RATIO_2_NSS - Max nss of 160MHz is equals to two times the max
+ * nss of 80MHz
+ */
+
+enum wmi_nss_ratio {
+ WMI_NSS_RATIO_1BY2_NSS = 0x0,
+ WMI_NSS_RATIO_3BY4_NSS = 0x1,
+ WMI_NSS_RATIO_1_NSS = 0x2,
+ WMI_NSS_RATIO_2_NSS = 0x3,
+};
+
+enum wmi_dtim_policy {
+ WMI_DTIM_POLICY_IGNORE = 1,
+ WMI_DTIM_POLICY_NORMAL = 2,
+ WMI_DTIM_POLICY_STICK = 3,
+ WMI_DTIM_POLICY_AUTO = 4,
+};
+
+struct wmi_host_pdev_band_to_mac {
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+};
+
+struct ath11k_ppe_threshold {
+ u32 numss_m1;
+ u32 ru_bit_mask;
+ u32 ppet16_ppet8_ru3_ru0[PSOC_HOST_MAX_NUM_SS];
+};
+
+struct ath11k_service_ext_param {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct ath11k_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 num_hw_modes;
+ u32 num_phy;
+};
+
+struct ath11k_hw_mode_caps {
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+};
+
+#define PSOC_HOST_MAX_PHY_SIZE (3)
+#define ATH11K_11B_SUPPORT BIT(0)
+#define ATH11K_11G_SUPPORT BIT(1)
+#define ATH11K_11A_SUPPORT BIT(2)
+#define ATH11K_11N_SUPPORT BIT(3)
+#define ATH11K_11AC_SUPPORT BIT(4)
+#define ATH11K_11AX_SUPPORT BIT(5)
+
+struct ath11k_hal_reg_capabilities_ext {
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+#define WMI_HOST_MAX_PDEV 3
+
+struct wlan_host_mem_chunk {
+ u32 tlv_header;
+ u32 req_id;
+ u32 ptr;
+ u32 size;
+} __packed;
+
+struct wmi_host_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+struct wmi_init_cmd_param {
+ u32 tlv_header;
+ struct target_resource_config *res_cfg;
+ u8 num_mem_chunks;
+ struct wmi_host_mem_chunk *mem_chunks;
+ u32 hw_mode_id;
+ u32 num_band_to_mac;
+ struct wmi_host_pdev_band_to_mac band_to_mac[WMI_HOST_MAX_PDEV];
+};
+
+struct wmi_pdev_band_to_mac {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 start_freq;
+ u32 end_freq;
+} __packed;
+
+struct wmi_pdev_set_hw_mode_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 hw_mode_index;
+ u32 num_band_to_mac;
+} __packed;
+
+struct wmi_ppe_threshold {
+ u32 numss_m1; /** NSS - 1*/
+ union {
+ u32 ru_count;
+ u32 ru_mask;
+ } __packed;
+ u32 ppet16_ppet8_ru3_ru0[WMI_MAX_NUM_SS];
+} __packed;
+
+#define HW_BD_INFO_SIZE 5
+
+struct wmi_abi_version {
+ u32 abi_version_0;
+ u32 abi_version_1;
+ u32 abi_version_ns_0;
+ u32 abi_version_ns_1;
+ u32 abi_version_ns_2;
+ u32 abi_version_ns_3;
+} __packed;
+
+struct wmi_init_cmd {
+ u32 tlv_header;
+ struct wmi_abi_version host_abi_vers;
+ u32 num_host_mem_chunks;
+} __packed;
+
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+#define WMI_RSRC_CFG_FLAG2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
+#define WMI_RSRC_CFG_FLAG1_ACK_RSSI BIT(18)
+
+#define WMI_CFG_HOST_SERVICE_FLAG_REG_CC_EXT 4
+
+struct wmi_resource_config {
+ u32 tlv_header;
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 flag1;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 alloc_frag_desc_for_data_pkt;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 max_num_dbs_scan_duty_cycle;
+ u32 max_num_group_keys;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+ u32 max_nlo_ssids;
+ u32 num_pkt_filters;
+ u32 num_max_sta_vdevs;
+ u32 max_bssid_indicator;
+ u32 ul_resp_config;
+ u32 msdu_flow_override_config0;
+ u32 msdu_flow_override_config1;
+ u32 flags2;
+ u32 host_service_flags;
+ u32 max_rnr_neighbours;
+ u32 ema_max_vap_cnt;
+ u32 ema_max_profile_period;
+} __packed;
+
+struct wmi_service_ready_event {
+ u32 fw_build_vers;
+ struct wmi_abi_version fw_abi_vers;
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 num_mem_reqs;
+ u32 max_num_scan_channels;
+ u32 hw_bd_id;
+ u32 hw_bd_info[HW_BD_INFO_SIZE];
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 num_dbs_hw_modes;
+ /* txrx_chainmask
+ * [7:0] - 2G band tx chain mask
+ * [15:8] - 2G band rx chain mask
+ * [23:16] - 5G band tx chain mask
+ * [31:24] - 5G band rx chain mask
+ */
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+} __packed;
+
+#define WMI_SERVICE_BM_SIZE ((WMI_MAX_SERVICE + sizeof(u32) - 1) / sizeof(u32))
+
+#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x u32 = 128 bits */
+#define WMI_SERVICE_EXT_BM_SIZE (WMI_SERVICE_SEGMENT_BM_SIZE32 * sizeof(u32))
+#define WMI_AVAIL_SERVICE_BITS_IN_SIZE32 32
+#define WMI_SERVICE_BITS_IN_SIZE32 4
+
+struct wmi_service_ready_ext_event {
+ u32 default_conc_scan_config_bits;
+ u32 default_fw_config_bits;
+ struct wmi_ppe_threshold ppet;
+ u32 he_cap_info;
+ u32 mpdu_density;
+ u32 max_bssid_rx_filters;
+ u32 fw_build_vers_ext;
+ u32 max_nlo_ssids;
+ u32 max_bssid_indicator;
+ u32 he_cap_info_ext;
+} __packed;
+
+struct wmi_soc_mac_phy_hw_mode_caps {
+ u32 num_hw_modes;
+ u32 num_chainmask_tables;
+} __packed;
+
+struct wmi_hw_mode_capabilities {
+ u32 tlv_header;
+ u32 hw_mode_id;
+ u32 phy_id_map;
+ u32 hw_mode_config_type;
+} __packed;
+
+#define WMI_MAX_HECAP_PHY_SIZE (3)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS BIT(0)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS, _val)
+#define WMI_NSS_RATIO_INFO_BITPOS GENMASK(4, 1)
+#define WMI_NSS_RATIO_INFO_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_INFO_BITPOS, _val)
+
+struct wmi_mac_phy_capabilities {
+ u32 hw_mode_id;
+ u32 pdev_id;
+ u32 phy_id;
+ u32 supported_flags;
+ u32 supported_bands;
+ u32 ampdu_density;
+ u32 max_bw_supported_2g;
+ u32 ht_cap_info_2g;
+ u32 vht_cap_info_2g;
+ u32 vht_supp_mcs_2g;
+ u32 he_cap_info_2g;
+ u32 he_supp_mcs_2g;
+ u32 tx_chain_mask_2g;
+ u32 rx_chain_mask_2g;
+ u32 max_bw_supported_5g;
+ u32 ht_cap_info_5g;
+ u32 vht_cap_info_5g;
+ u32 vht_supp_mcs_5g;
+ u32 he_cap_info_5g;
+ u32 he_supp_mcs_5g;
+ u32 tx_chain_mask_5g;
+ u32 rx_chain_mask_5g;
+ u32 he_cap_phy_info_2g[WMI_MAX_HECAP_PHY_SIZE];
+ u32 he_cap_phy_info_5g[WMI_MAX_HECAP_PHY_SIZE];
+ struct wmi_ppe_threshold he_ppet2g;
+ struct wmi_ppe_threshold he_ppet5g;
+ u32 chainmask_table_id;
+ u32 lmac_id;
+ u32 he_cap_info_2g_ext;
+ u32 he_cap_info_5g_ext;
+ u32 he_cap_info_internal;
+ u32 wireless_modes;
+ u32 low_2ghz_chan_freq;
+ u32 high_2ghz_chan_freq;
+ u32 low_5ghz_chan_freq;
+ u32 high_5ghz_chan_freq;
+ u32 nss_ratio;
+} __packed;
+
+struct wmi_hal_reg_capabilities_ext {
+ u32 tlv_header;
+ u32 phy_id;
+ u32 eeprom_reg_domain;
+ u32 eeprom_reg_domain_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+} __packed;
+
+struct wmi_soc_hal_reg_capabilities {
+ u32 num_phy;
+} __packed;
+
+/* 2 word representation of MAC addr */
+struct wmi_mac_addr {
+ union {
+ u8 addr[6];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_dma_ring_capabilities {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 module_id;
+ u32 min_elem;
+ u32 min_buf_sz;
+ u32 min_buf_align;
+} __packed;
+
+struct wmi_ready_event_min {
+ struct wmi_abi_version fw_abi_vers;
+ struct wmi_mac_addr mac_addr;
+ u32 status;
+ u32 num_dscp_table;
+ u32 num_extra_mac_addr;
+ u32 num_total_peers;
+ u32 num_extra_peers;
+} __packed;
+
+struct wmi_ready_event {
+ struct wmi_ready_event_min ready_event_min;
+ u32 max_ast_index;
+ u32 pktlog_defs_checksum;
+} __packed;
+
+struct wmi_service_available_event {
+ u32 wmi_service_segment_offset;
+ u32 wmi_service_segment_bitmap[WMI_SERVICE_SEGMENT_BM_SIZE32];
+} __packed;
+
+struct ath11k_pdev_wmi {
+ struct ath11k_wmi_base *wmi_ab;
+ enum ath11k_htc_ep_id eid;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 rx_decap_mode;
+ wait_queue_head_t tx_ce_desc_wq;
+};
+
+struct vdev_create_params {
+ u8 if_id;
+ u32 type;
+ u32 subtype;
+ struct {
+ u8 tx;
+ u8 rx;
+ } chains[NUM_NL80211_BANDS];
+ u32 pdev_id;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
+};
+
+struct wmi_vdev_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_type;
+ u32 vdev_subtype;
+ struct wmi_mac_addr vdev_macaddr;
+ u32 num_cfg_txrx_streams;
+ u32 pdev_id;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
+} __packed;
+
+struct wmi_vdev_txrx_streams {
+ u32 tlv_header;
+ u32 band;
+ u32 supported_tx_streams;
+ u32 supported_rx_streams;
+} __packed;
+
+struct wmi_vdev_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_up_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 vdev_assoc_id;
+ struct wmi_mac_addr vdev_bssid;
+ struct wmi_mac_addr tx_vdev_bssid;
+ u32 nontx_profile_idx;
+ u32 nontx_profile_cnt;
+} __packed;
+
+struct wmi_vdev_stop_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_vdev_down_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
+#define WMI_VDEV_START_PMF_ENABLED BIT(1)
+#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+#define WMI_VDEV_START_HW_ENCRYPTION_DISABLED BIT(4)
+
+struct wmi_ssid {
+ u32 ssid_len;
+ u32 ssid[8];
+} __packed;
+
+#define ATH11K_VDEV_SETUP_TIMEOUT_HZ (1 * HZ)
+
+struct wmi_vdev_start_request_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 requestor_id;
+ u32 beacon_interval;
+ u32 dtim_period;
+ u32 flags;
+ struct wmi_ssid ssid;
+ u32 bcn_tx_rate;
+ u32 bcn_txpower;
+ u32 num_noa_descriptors;
+ u32 disable_hw_ack;
+ u32 preferred_tx_streams;
+ u32 preferred_rx_streams;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+ u32 min_data_rate;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
+} __packed;
+
+#define MGMT_TX_DL_FRM_LEN 64
+#define WMI_MAC_MAX_SSID_LENGTH 32
+struct mac_ssid {
+ u8 length;
+ u8 mac_ssid[WMI_MAC_MAX_SSID_LENGTH];
+} __packed;
+
+struct wmi_p2p_noa_descriptor {
+ u32 type_count;
+ u32 duration;
+ u32 interval;
+ u32 start_time;
+};
+
+struct channel_param {
+ u8 chan_id;
+ u8 pwr;
+ u32 mhz;
+ u32 half_rate:1,
+ quarter_rate:1,
+ dfs_set:1,
+ dfs_set_cfreq2:1,
+ is_chan_passive:1,
+ allow_ht:1,
+ allow_vht:1,
+ allow_he:1,
+ set_agile:1,
+ psc_channel:1;
+ u32 phy_mode;
+ u32 cfreq1;
+ u32 cfreq2;
+ char maxpower;
+ char minpower;
+ char maxregpower;
+ u8 antennamax;
+ u8 reg_class_id;
+} __packed;
+
+enum wmi_phy_mode {
+ MODE_11A = 0,
+ MODE_11G = 1, /* 11b/g Mode */
+ MODE_11B = 2, /* 11b Mode */
+ MODE_11GONLY = 3, /* 11g only Mode */
+ MODE_11NA_HT20 = 4,
+ MODE_11NG_HT20 = 5,
+ MODE_11NA_HT40 = 6,
+ MODE_11NG_HT40 = 7,
+ MODE_11AC_VHT20 = 8,
+ MODE_11AC_VHT40 = 9,
+ MODE_11AC_VHT80 = 10,
+ MODE_11AC_VHT20_2G = 11,
+ MODE_11AC_VHT40_2G = 12,
+ MODE_11AC_VHT80_2G = 13,
+ MODE_11AC_VHT80_80 = 14,
+ MODE_11AC_VHT160 = 15,
+ MODE_11AX_HE20 = 16,
+ MODE_11AX_HE40 = 17,
+ MODE_11AX_HE80 = 18,
+ MODE_11AX_HE80_80 = 19,
+ MODE_11AX_HE160 = 20,
+ MODE_11AX_HE20_2G = 21,
+ MODE_11AX_HE40_2G = 22,
+ MODE_11AX_HE80_2G = 23,
+ MODE_UNKNOWN = 24,
+ MODE_MAX = 24
+};
+
+static inline const char *ath11k_wmi_phymode_str(enum wmi_phy_mode mode)
+{
+ switch (mode) {
+ case MODE_11A:
+ return "11a";
+ case MODE_11G:
+ return "11g";
+ case MODE_11B:
+ return "11b";
+ case MODE_11GONLY:
+ return "11gonly";
+ case MODE_11NA_HT20:
+ return "11na-ht20";
+ case MODE_11NG_HT20:
+ return "11ng-ht20";
+ case MODE_11NA_HT40:
+ return "11na-ht40";
+ case MODE_11NG_HT40:
+ return "11ng-ht40";
+ case MODE_11AC_VHT20:
+ return "11ac-vht20";
+ case MODE_11AC_VHT40:
+ return "11ac-vht40";
+ case MODE_11AC_VHT80:
+ return "11ac-vht80";
+ case MODE_11AC_VHT160:
+ return "11ac-vht160";
+ case MODE_11AC_VHT80_80:
+ return "11ac-vht80+80";
+ case MODE_11AC_VHT20_2G:
+ return "11ac-vht20-2g";
+ case MODE_11AC_VHT40_2G:
+ return "11ac-vht40-2g";
+ case MODE_11AC_VHT80_2G:
+ return "11ac-vht80-2g";
+ case MODE_11AX_HE20:
+ return "11ax-he20";
+ case MODE_11AX_HE40:
+ return "11ax-he40";
+ case MODE_11AX_HE80:
+ return "11ax-he80";
+ case MODE_11AX_HE80_80:
+ return "11ax-he80+80";
+ case MODE_11AX_HE160:
+ return "11ax-he160";
+ case MODE_11AX_HE20_2G:
+ return "11ax-he20-2g";
+ case MODE_11AX_HE40_2G:
+ return "11ax-he40-2g";
+ case MODE_11AX_HE80_2G:
+ return "11ax-he80-2g";
+ case MODE_UNKNOWN:
+ /* skip */
+ break;
+
+ /* no default handler to allow compiler to check that the
+ * enum is fully handled
+ */
+ }
+
+ return "<unknown>";
+}
+
+struct wmi_channel_arg {
+ u32 freq;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ bool passive;
+ bool allow_ibss;
+ bool allow_ht;
+ bool allow_vht;
+ bool ht40plus;
+ bool chan_radar;
+ bool freq2_radar;
+ bool allow_he;
+ u32 min_power;
+ u32 max_power;
+ u32 max_reg_power;
+ u32 max_antenna_gain;
+ enum wmi_phy_mode mode;
+};
+
+struct wmi_vdev_start_req_arg {
+ u32 vdev_id;
+ struct wmi_channel_arg channel;
+ u32 bcn_intval;
+ u32 dtim_period;
+ u8 *ssid;
+ u32 ssid_len;
+ u32 bcn_tx_rate;
+ u32 bcn_tx_power;
+ bool disable_hw_ack;
+ bool hidden_ssid;
+ bool pmf_enabled;
+ u32 he_ops;
+ u32 cac_duration_ms;
+ u32 regdomain;
+ u32 pref_rx_streams;
+ u32 pref_tx_streams;
+ u32 num_noa_descriptors;
+ u32 min_data_rate;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
+};
+
+struct peer_create_params {
+ const u8 *peer_addr;
+ u32 peer_type;
+ u32 vdev_id;
+};
+
+struct peer_delete_params {
+ u8 vdev_id;
+};
+
+struct peer_flush_params {
+ u32 peer_tid_bitmap;
+ u8 vdev_id;
+};
+
+struct pdev_set_regdomain_params {
+ u16 current_rd_in_use;
+ u16 current_rd_2g;
+ u16 current_rd_5g;
+ u32 ctl_2g;
+ u32 ctl_5g;
+ u8 dfs_domain;
+ u32 pdev_id;
+};
+
+struct rx_reorder_queue_remove_params {
+ u8 *peer_macaddr;
+ u16 vdev_id;
+ u32 peer_tid_bitmap;
+};
+
+#define WMI_HOST_PDEV_ID_SOC 0xFF
+#define WMI_HOST_PDEV_ID_0 0
+#define WMI_HOST_PDEV_ID_1 1
+#define WMI_HOST_PDEV_ID_2 2
+
+#define WMI_PDEV_ID_SOC 0
+#define WMI_PDEV_ID_1ST 1
+#define WMI_PDEV_ID_2ND 2
+#define WMI_PDEV_ID_3RD 3
+
+/* Freq units in MHz */
+#define REG_RULE_START_FREQ 0x0000ffff
+#define REG_RULE_END_FREQ 0xffff0000
+#define REG_RULE_FLAGS 0x0000ffff
+#define REG_RULE_MAX_BW 0x0000ffff
+#define REG_RULE_REG_PWR 0x00ff0000
+#define REG_RULE_ANT_GAIN 0xff000000
+#define REG_RULE_PSD_INFO BIT(0)
+#define REG_RULE_PSD_EIRP 0xff0000
+
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1)
+#define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
+#define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
+
+#define HE_PHYCAP_BYTE_0 0
+#define HE_PHYCAP_BYTE_1 1
+#define HE_PHYCAP_BYTE_2 2
+#define HE_PHYCAP_BYTE_3 3
+#define HE_PHYCAP_BYTE_4 4
+
+#define HECAP_PHY_SU_BFER BIT(7)
+#define HECAP_PHY_SU_BFEE BIT(0)
+#define HECAP_PHY_MU_BFER BIT(1)
+#define HECAP_PHY_UL_MUMIMO BIT(6)
+#define HECAP_PHY_UL_MUOFDMA BIT(7)
+
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFER, hecap_phy[HE_PHYCAP_BYTE_3])
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_SU_BFEE, hecap_phy[HE_PHYCAP_BYTE_4])
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_MU_BFER, hecap_phy[HE_PHYCAP_BYTE_4])
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUMIMO, hecap_phy[HE_PHYCAP_BYTE_2])
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+ FIELD_GET(HECAP_PHY_UL_MUOFDMA, hecap_phy[HE_PHYCAP_BYTE_2])
+
+#define HE_MODE_SU_TX_BFEE BIT(0)
+#define HE_MODE_SU_TX_BFER BIT(1)
+#define HE_MODE_MU_TX_BFEE BIT(2)
+#define HE_MODE_MU_TX_BFER BIT(3)
+#define HE_MODE_DL_OFDMA BIT(4)
+#define HE_MODE_UL_OFDMA BIT(5)
+#define HE_MODE_UL_MUMIMO BIT(6)
+
+#define HE_DL_MUOFDMA_ENABLE 1
+#define HE_UL_MUOFDMA_ENABLE 1
+#define HE_DL_MUMIMO_ENABLE 1
+#define HE_UL_MUMIMO_ENABLE 1
+#define HE_MU_BFEE_ENABLE 1
+#define HE_SU_BFEE_ENABLE 1
+#define HE_MU_BFER_ENABLE 1
+#define HE_SU_BFER_ENABLE 1
+
+#define HE_VHT_SOUNDING_MODE_ENABLE 1
+#define HE_SU_MU_SOUNDING_MODE_ENABLE 1
+#define HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE 1
+
+/* HE or VHT Sounding */
+#define HE_VHT_SOUNDING_MODE BIT(0)
+/* SU or MU Sounding */
+#define HE_SU_MU_SOUNDING_MODE BIT(2)
+/* Trig or Non-Trig Sounding */
+#define HE_TRIG_NONTRIG_SOUNDING_MODE BIT(3)
+
+#define WMI_TXBF_STS_CAP_OFFSET_LSB 4
+#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70
+#define WMI_BF_SOUND_DIM_OFFSET_LSB 8
+#define WMI_BF_SOUND_DIM_OFFSET_MASK 0x700
+
+struct pdev_params {
+ u32 param_id;
+ u32 param_value;
+};
+
+enum wmi_peer_type {
+ WMI_PEER_TYPE_DEFAULT = 0,
+ WMI_PEER_TYPE_BSS = 1,
+ WMI_PEER_TYPE_TDLS = 2,
+};
+
+struct wmi_peer_create_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_type;
+} __packed;
+
+struct wmi_peer_delete_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_reorder_queue_setup_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 queue_ptr_lo;
+ u32 queue_ptr_hi;
+ u32 queue_no;
+ u32 ba_window_size_valid;
+ u32 ba_window_size;
+} __packed;
+
+struct wmi_peer_reorder_queue_remove_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid_mask;
+} __packed;
+
+struct gpio_config_params {
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+enum wmi_gpio_type {
+ WMI_GPIO_PULL_NONE,
+ WMI_GPIO_PULL_UP,
+ WMI_GPIO_PULL_DOWN
+};
+
+enum wmi_gpio_intr_type {
+ WMI_GPIO_INTTYPE_DISABLE,
+ WMI_GPIO_INTTYPE_RISING_EDGE,
+ WMI_GPIO_INTTYPE_FALLING_EDGE,
+ WMI_GPIO_INTTYPE_BOTH_EDGE,
+ WMI_GPIO_INTTYPE_LEVEL_LOW,
+ WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+enum wmi_bss_chan_info_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_gpio_config_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 input;
+ u32 pull_type;
+ u32 intr_mode;
+};
+
+struct gpio_output_params {
+ u32 gpio_num;
+ u32 set;
+};
+
+struct wmi_gpio_output_cmd_param {
+ u32 tlv_header;
+ u32 gpio_num;
+ u32 set;
+};
+
+struct set_fwtest_params {
+ u32 arg;
+ u32 value;
+};
+
+struct wmi_fwtest_set_param_cmd_param {
+ u32 tlv_header;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct wmi_pdev_set_param_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_pdev_set_ps_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+} __packed;
+
+struct wmi_pdev_suspend_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 suspend_opt;
+} __packed;
+
+struct wmi_pdev_resume_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_req_cmd {
+ u32 tlv_header;
+ /* ref wmi_bss_chan_info_req_type */
+ u32 req_type;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_ap_ps_peer_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_sta_powersave_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+} __packed;
+
+struct wmi_pdev_set_regdomain_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 reg_domain;
+ u32 reg_domain_2g;
+ u32 reg_domain_5g;
+ u32 conformance_test_limit_2g;
+ u32 conformance_test_limit_5g;
+ u32 dfs_domain;
+} __packed;
+
+struct wmi_peer_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+struct wmi_peer_flush_tids_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_tid_bitmap;
+} __packed;
+
+struct wmi_dfs_phyerr_offload_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_bcn_offload_ctrl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 bcn_ctrl_op;
+} __packed;
+
+enum scan_dwelltime_adaptive_mode {
+ SCAN_DWELL_MODE_DEFAULT = 0,
+ SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ SCAN_DWELL_MODE_MODERATE = 2,
+ SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ SCAN_DWELL_MODE_STATIC = 4
+};
+
+#define WLAN_SSID_MAX_LEN 32
+
+struct element_info {
+ u32 len;
+ u8 *ptr;
+};
+
+struct wlan_ssid {
+ u8 length;
+ u8 ssid[WLAN_SSID_MAX_LEN];
+};
+
+#define WMI_IE_BITMAP_SIZE 8
+
+/* prefix used by scan requestor ids on the host */
+#define WMI_HOST_SCAN_REQUESTOR_ID_PREFIX 0xA000
+
+/* prefix used by scan request ids generated on the host */
+/* host cycles through the lower 12 bits to generate ids */
+#define WMI_HOST_SCAN_REQ_ID_PREFIX 0xA000
+
+/* Values lower than this may be refused by some firmware revisions with a scan
+ * completion with a timedout reason.
+ */
+#define WMI_SCAN_CHAN_MIN_TIME_MSEC 40
+
+/* Scan priority numbers must be sequential, starting with 0 */
+enum wmi_scan_priority {
+ WMI_SCAN_PRIORITY_VERY_LOW = 0,
+ WMI_SCAN_PRIORITY_LOW,
+ WMI_SCAN_PRIORITY_MEDIUM,
+ WMI_SCAN_PRIORITY_HIGH,
+ WMI_SCAN_PRIORITY_VERY_HIGH,
+ WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
+};
+
+enum wmi_scan_event_type {
+ WMI_SCAN_EVENT_STARTED = BIT(0),
+ WMI_SCAN_EVENT_COMPLETED = BIT(1),
+ WMI_SCAN_EVENT_BSS_CHANNEL = BIT(2),
+ WMI_SCAN_EVENT_FOREIGN_CHAN = BIT(3),
+ WMI_SCAN_EVENT_DEQUEUED = BIT(4),
+ /* possibly by high-prio scan */
+ WMI_SCAN_EVENT_PREEMPTED = BIT(5),
+ WMI_SCAN_EVENT_START_FAILED = BIT(6),
+ WMI_SCAN_EVENT_RESTARTED = BIT(7),
+ WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT = BIT(8),
+ WMI_SCAN_EVENT_SUSPENDED = BIT(9),
+ WMI_SCAN_EVENT_RESUMED = BIT(10),
+ WMI_SCAN_EVENT_MAX = BIT(15),
+};
+
+enum wmi_scan_completion_reason {
+ WMI_SCAN_REASON_COMPLETED,
+ WMI_SCAN_REASON_CANCELLED,
+ WMI_SCAN_REASON_PREEMPTED,
+ WMI_SCAN_REASON_TIMEDOUT,
+ WMI_SCAN_REASON_INTERNAL_FAILURE,
+ WMI_SCAN_REASON_MAX,
+};
+
+struct wmi_start_scan_cmd {
+ u32 tlv_header;
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 ie_len;
+ u32 n_probes;
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+ u32 ie_bitmap[WMI_IE_BITMAP_SIZE];
+ u32 num_vendor_oui;
+ u32 scan_ctrl_flags_ext;
+ u32 dwell_time_active_2g;
+ u32 dwell_time_active_6g;
+ u32 dwell_time_passive_6g;
+ u32 scan_start_offset;
+} __packed;
+
+#define WMI_SCAN_FLAG_PASSIVE 0x1
+#define WMI_SCAN_ADD_BCAST_PROBE_REQ 0x2
+#define WMI_SCAN_ADD_CCK_RATES 0x4
+#define WMI_SCAN_ADD_OFDM_RATES 0x8
+#define WMI_SCAN_CHAN_STAT_EVENT 0x10
+#define WMI_SCAN_FILTER_PROBE_REQ 0x20
+#define WMI_SCAN_BYPASS_DFS_CHN 0x40
+#define WMI_SCAN_CONTINUE_ON_ERROR 0x80
+#define WMI_SCAN_FILTER_PROMISCUOS 0x100
+#define WMI_SCAN_FLAG_FORCE_ACTIVE_ON_DFS 0x200
+#define WMI_SCAN_ADD_TPC_IE_IN_PROBE_REQ 0x400
+#define WMI_SCAN_ADD_DS_IE_IN_PROBE_REQ 0x800
+#define WMI_SCAN_ADD_SPOOF_MAC_IN_PROBE_REQ 0x1000
+#define WMI_SCAN_OFFCHAN_MGMT_TX 0x2000
+#define WMI_SCAN_OFFCHAN_DATA_TX 0x4000
+#define WMI_SCAN_CAPTURE_PHY_ERROR 0x8000
+#define WMI_SCAN_FLAG_STRICT_PASSIVE_ON_PCHN 0x10000
+#define WMI_SCAN_FLAG_HALF_RATE_SUPPORT 0x20000
+#define WMI_SCAN_FLAG_QUARTER_RATE_SUPPORT 0x40000
+#define WMI_SCAN_RANDOM_SEQ_NO_IN_PROBE_REQ 0x80000
+#define WMI_SCAN_ENABLE_IE_WHTELIST_IN_PROBE_REQ 0x100000
+
+#define WMI_SCAN_DWELL_MODE_MASK 0x00E00000
+#define WMI_SCAN_DWELL_MODE_SHIFT 21
+#define WMI_SCAN_FLAG_EXT_PASSIVE_SCAN_START_TIME_ENHANCE 0x00000800
+
+#define WMI_SCAN_CONFIG_PER_CHANNEL_MASK GENMASK(19, 0)
+#define WMI_SCAN_CH_FLAG_SCAN_ONLY_IF_RNR_FOUND BIT(20)
+
+enum {
+ WMI_SCAN_DWELL_MODE_DEFAULT = 0,
+ WMI_SCAN_DWELL_MODE_CONSERVATIVE = 1,
+ WMI_SCAN_DWELL_MODE_MODERATE = 2,
+ WMI_SCAN_DWELL_MODE_AGGRESSIVE = 3,
+ WMI_SCAN_DWELL_MODE_STATIC = 4,
+};
+
+#define WMI_SCAN_SET_DWELL_MODE(flag, mode) \
+ ((flag) |= (((mode) << WMI_SCAN_DWELL_MODE_SHIFT) & \
+ WMI_SCAN_DWELL_MODE_MASK))
+
+struct hint_short_ssid {
+ u32 freq_flags;
+ u32 short_ssid;
+};
+
+struct hint_bssid {
+ u32 freq_flags;
+ struct wmi_mac_addr bssid;
+};
+
+struct scan_req_params {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 pdev_id;
+ enum wmi_scan_priority scan_priority;
+ union {
+ struct {
+ u32 scan_ev_started:1,
+ scan_ev_completed:1,
+ scan_ev_bss_chan:1,
+ scan_ev_foreign_chan:1,
+ scan_ev_dequeued:1,
+ scan_ev_preempted:1,
+ scan_ev_start_failed:1,
+ scan_ev_restarted:1,
+ scan_ev_foreign_chn_exit:1,
+ scan_ev_invalid:1,
+ scan_ev_gpio_timeout:1,
+ scan_ev_suspended:1,
+ scan_ev_resumed:1;
+ };
+ u32 scan_events;
+ };
+ u32 scan_ctrl_flags_ext;
+ u32 dwell_time_active;
+ u32 dwell_time_active_2g;
+ u32 dwell_time_passive;
+ u32 dwell_time_active_6g;
+ u32 dwell_time_passive_6g;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ union {
+ struct {
+ u32 scan_f_passive:1,
+ scan_f_bcast_probe:1,
+ scan_f_cck_rates:1,
+ scan_f_ofdm_rates:1,
+ scan_f_chan_stat_evnt:1,
+ scan_f_filter_prb_req:1,
+ scan_f_bypass_dfs_chn:1,
+ scan_f_continue_on_err:1,
+ scan_f_offchan_mgmt_tx:1,
+ scan_f_offchan_data_tx:1,
+ scan_f_promisc_mode:1,
+ scan_f_capture_phy_err:1,
+ scan_f_strict_passive_pch:1,
+ scan_f_half_rate:1,
+ scan_f_quarter_rate:1,
+ scan_f_force_active_dfs_chn:1,
+ scan_f_add_tpc_ie_in_probe:1,
+ scan_f_add_ds_ie_in_probe:1,
+ scan_f_add_spoofed_mac_in_probe:1,
+ scan_f_add_rand_seq_in_probe:1,
+ scan_f_en_ie_whitelist_in_probe:1,
+ scan_f_forced:1,
+ scan_f_2ghz:1,
+ scan_f_5ghz:1,
+ scan_f_80mhz:1;
+ };
+ u32 scan_flags;
+ };
+ enum scan_dwelltime_adaptive_mode adaptive_dwell_time_mode;
+ u32 burst_duration;
+ u32 num_chan;
+ u32 num_bssid;
+ u32 num_ssids;
+ u32 n_probes;
+ u32 *chan_list;
+ u32 notify_scan_events;
+ struct wlan_ssid ssid[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_mac_addr bssid_list[WLAN_SCAN_PARAMS_MAX_BSSID];
+ struct element_info extraie;
+ struct element_info htcap;
+ struct element_info vhtcap;
+ u32 num_hint_s_ssid;
+ u32 num_hint_bssid;
+ struct hint_short_ssid hint_s_ssid[WLAN_SCAN_MAX_HINT_S_SSID];
+ struct hint_bssid hint_bssid[WLAN_SCAN_MAX_HINT_BSSID];
+ struct wmi_mac_addr mac_addr;
+ struct wmi_mac_addr mac_mask;
+};
+
+struct wmi_ssid_arg {
+ int len;
+ const u8 *ssid;
+};
+
+struct wmi_bssid_arg {
+ const u8 *bssid;
+};
+
+struct wmi_start_scan_arg {
+ u32 scan_id;
+ u32 scan_req_id;
+ u32 vdev_id;
+ u32 scan_priority;
+ u32 notify_scan_events;
+ u32 dwell_time_active;
+ u32 dwell_time_passive;
+ u32 min_rest_time;
+ u32 max_rest_time;
+ u32 repeat_probe_time;
+ u32 probe_spacing_time;
+ u32 idle_time;
+ u32 max_scan_time;
+ u32 probe_delay;
+ u32 scan_ctrl_flags;
+
+ u32 ie_len;
+ u32 n_channels;
+ u32 n_ssids;
+ u32 n_bssids;
+
+ u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
+ u32 channels[64];
+ struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
+ struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
+};
+
+#define WMI_SCAN_STOP_ONE 0x00000000
+#define WMI_SCN_STOP_VAP_ALL 0x01000000
+#define WMI_SCAN_STOP_ALL 0x04000000
+
+/* Prefix 0xA000 indicates that the scan request
+ * is trigger by HOST
+ */
+#define ATH11K_SCAN_ID 0xA000
+
+enum scan_cancel_req_type {
+ WLAN_SCAN_CANCEL_SINGLE = 1,
+ WLAN_SCAN_CANCEL_VDEV_ALL,
+ WLAN_SCAN_CANCEL_PDEV_ALL,
+};
+
+struct scan_cancel_param {
+ u32 requester;
+ u32 scan_id;
+ enum scan_cancel_req_type req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct wmi_bcn_send_from_host_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 data_len;
+ union {
+ u32 frag_ptr;
+ u32 frag_ptr_lo;
+ };
+ u32 frame_ctrl;
+ u32 dtim_flag;
+ u32 bcn_antenna;
+ u32 frag_ptr_hi;
+};
+
+#define WMI_CHAN_INFO_MODE GENMASK(5, 0)
+#define WMI_CHAN_INFO_HT40_PLUS BIT(6)
+#define WMI_CHAN_INFO_PASSIVE BIT(7)
+#define WMI_CHAN_INFO_ADHOC_ALLOWED BIT(8)
+#define WMI_CHAN_INFO_AP_DISABLED BIT(9)
+#define WMI_CHAN_INFO_DFS BIT(10)
+#define WMI_CHAN_INFO_ALLOW_HT BIT(11)
+#define WMI_CHAN_INFO_ALLOW_VHT BIT(12)
+#define WMI_CHAN_INFO_CHAN_CHANGE_CAUSE_CSA BIT(13)
+#define WMI_CHAN_INFO_HALF_RATE BIT(14)
+#define WMI_CHAN_INFO_QUARTER_RATE BIT(15)
+#define WMI_CHAN_INFO_DFS_FREQ2 BIT(16)
+#define WMI_CHAN_INFO_ALLOW_HE BIT(17)
+#define WMI_CHAN_INFO_PSC BIT(18)
+
+#define WMI_CHAN_REG_INFO1_MIN_PWR GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO1_MAX_PWR GENMASK(15, 8)
+#define WMI_CHAN_REG_INFO1_MAX_REG_PWR GENMASK(23, 16)
+#define WMI_CHAN_REG_INFO1_REG_CLS GENMASK(31, 24)
+
+#define WMI_CHAN_REG_INFO2_ANT_MAX GENMASK(7, 0)
+#define WMI_CHAN_REG_INFO2_MAX_TX_PWR GENMASK(15, 8)
+
+struct wmi_channel {
+ u32 tlv_header;
+ u32 mhz;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ u32 info;
+ u32 reg_info_1;
+ u32 reg_info_2;
+} __packed;
+
+struct wmi_mgmt_params {
+ void *tx_frame;
+ u16 frm_len;
+ u8 vdev_id;
+ u16 chanfreq;
+ void *pdata;
+ u16 desc_id;
+ u8 *macaddr;
+};
+
+enum wmi_sta_ps_mode {
+ WMI_STA_PS_MODE_DISABLED = 0,
+ WMI_STA_PS_MODE_ENABLED = 1,
+};
+
+#define WMI_SMPS_MASK_LOWER_16BITS 0xFF
+#define WMI_SMPS_MASK_UPPER_3BITS 0x7
+#define WMI_SMPS_PARAM_VALUE_SHIFT 29
+
+#define ATH11K_WMI_FW_HANG_ASSERT_TYPE 1
+#define ATH11K_WMI_FW_HANG_DELAY 0
+
+/* type, 0:unused 1: ASSERT 2: not respond detect command
+ * delay_time_ms, the simulate will delay time
+ */
+
+struct wmi_force_fw_hang_cmd {
+ u32 tlv_header;
+ u32 type;
+ u32 delay_time_ms;
+};
+
+struct wmi_vdev_set_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param_id;
+ u32 param_value;
+} __packed;
+
+enum wmi_stats_id {
+ WMI_REQUEST_PEER_STAT = BIT(0),
+ WMI_REQUEST_AP_STAT = BIT(1),
+ WMI_REQUEST_PDEV_STAT = BIT(2),
+ WMI_REQUEST_VDEV_STAT = BIT(3),
+ WMI_REQUEST_BCNFLT_STAT = BIT(4),
+ WMI_REQUEST_VDEV_RATE_STAT = BIT(5),
+ WMI_REQUEST_INST_STAT = BIT(6),
+ WMI_REQUEST_MIB_STAT = BIT(7),
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT = BIT(8),
+ WMI_REQUEST_CONGESTION_STAT = BIT(9),
+ WMI_REQUEST_PEER_EXTD_STAT = BIT(10),
+ WMI_REQUEST_BCN_STAT = BIT(11),
+ WMI_REQUEST_BCN_STAT_RESET = BIT(12),
+ WMI_REQUEST_PEER_EXTD2_STAT = BIT(13),
+};
+
+struct wmi_request_stats_cmd {
+ u32 tlv_header;
+ enum wmi_stats_id stats_id;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_get_pdev_temperature_cmd {
+ u32 tlv_header;
+ u32 param;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_ftm_seg_hdr {
+ u32 len;
+ u32 msgref;
+ u32 segmentinfo;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_ftm_cmd {
+ u32 tlv_header;
+ struct wmi_ftm_seg_hdr seg_hdr;
+ u8 data[];
+} __packed;
+
+struct wmi_ftm_event_msg {
+ struct wmi_ftm_seg_hdr seg_hdr;
+ u8 data[];
+} __packed;
+
+#define WMI_BEACON_TX_BUFFER_SIZE 512
+
+#define WMI_EMA_TMPL_IDX_SHIFT 8
+#define WMI_EMA_FIRST_TMPL_SHIFT 16
+#define WMI_EMA_LAST_TMPL_SHIFT 24
+
+struct wmi_bcn_tmpl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 tim_ie_offset;
+ u32 buf_len;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u32 csa_event_bitmap;
+ u32 mbssid_ie_offset;
+ u32 esp_ie_offset;
+ u32 csc_switch_count_offset;
+ u32 csc_event_bitmap;
+ u32 mu_edca_ie_offset;
+ u32 feature_enable_bitmap;
+ u32 ema_params;
+} __packed;
+
+struct wmi_key_seq_counter {
+ u32 key_seq_counter_l;
+ u32 key_seq_counter_h;
+} __packed;
+
+struct wmi_vdev_install_key_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ struct wmi_key_seq_counter key_rsc_counter;
+ struct wmi_key_seq_counter key_global_rsc_counter;
+ struct wmi_key_seq_counter key_tsc_counter;
+ u8 wpi_key_rsc_counter[16];
+ u8 wpi_key_tsc_counter[16];
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u32 is_group_key_id_valid;
+ u32 group_key_id;
+
+ /* Followed by key_data containing key followed by
+ * tx mic and then rx mic
+ */
+} __packed;
+
+struct wmi_vdev_install_key_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 key_cipher;
+ u32 key_len;
+ u32 key_txmic_len;
+ u32 key_rxmic_len;
+ u64 key_rsc_counter;
+ const void *key_data;
+};
+
+#define WMI_MAX_SUPPORTED_RATES 128
+#define WMI_HOST_MAX_HECAP_PHY_SIZE 3
+#define WMI_HOST_MAX_HE_RATE_SET 3
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80 0
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_160 1
+#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80 2
+
+struct wmi_rate_set_arg {
+ u32 num_rates;
+ u8 rates[WMI_MAX_SUPPORTED_RATES];
+};
+
+struct peer_assoc_params {
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ struct wmi_rate_set_arg peer_legacy_rates;
+ struct wmi_rate_set_arg peer_ht_rates;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u8 vht_capable;
+ u8 min_data_rate;
+ u32 tx_max_mcs_nss;
+ u32 peer_bw_rxnss_override;
+ bool is_pmf_enabled;
+ bool is_wme_set;
+ bool qos_flag;
+ bool apsd_flag;
+ bool ht_flag;
+ bool bw_40;
+ bool bw_80;
+ bool bw_160;
+ bool stbc_flag;
+ bool ldpc_flag;
+ bool static_mimops_flag;
+ bool dynamic_mimops_flag;
+ bool spatial_mux_flag;
+ bool vht_flag;
+ bool vht_ng_flag;
+ bool need_ptk_4_way;
+ bool need_gtk_2_way;
+ bool auth_flag;
+ bool safe_mode_enabled;
+ bool amsdu_disable;
+ /* Use common structure */
+ u8 peer_mac[ETH_ALEN];
+
+ bool he_flag;
+ u32 peer_he_cap_macinfo[2];
+ u32 peer_he_cap_macinfo_internal;
+ u32 peer_he_caps_6ghz;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phyinfo[WMI_HOST_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs_count;
+ u32 peer_he_rx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ u32 peer_he_tx_mcs_set[WMI_HOST_MAX_HE_RATE_SET];
+ bool twt_responder;
+ bool twt_requester;
+ bool is_assoc;
+ struct ath11k_ppe_threshold peer_ppet;
+};
+
+struct wmi_peer_assoc_complete_cmd {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+ u32 vdev_id;
+ u32 peer_new_assoc;
+ u32 peer_associd;
+ u32 peer_flags;
+ u32 peer_caps;
+ u32 peer_listen_intval;
+ u32 peer_ht_caps;
+ u32 peer_max_mpdu;
+ u32 peer_mpdu_density;
+ u32 peer_rate_caps;
+ u32 peer_nss;
+ u32 peer_vht_caps;
+ u32 peer_phymode;
+ u32 peer_ht_info[2];
+ u32 num_peer_legacy_rates;
+ u32 num_peer_ht_rates;
+ u32 peer_bw_rxnss_override;
+ struct wmi_ppe_threshold peer_ppet;
+ u32 peer_he_cap_info;
+ u32 peer_he_ops;
+ u32 peer_he_cap_phy[WMI_MAX_HECAP_PHY_SIZE];
+ u32 peer_he_mcs;
+ u32 peer_he_cap_info_ext;
+ u32 peer_he_cap_info_internal;
+ u32 min_data_rate;
+ u32 peer_he_caps_6ghz;
+} __packed;
+
+struct wmi_stop_scan_cmd {
+ u32 tlv_header;
+ u32 requestor;
+ u32 scan_id;
+ u32 req_type;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct scan_chan_list_params {
+ u32 pdev_id;
+ u16 nallchans;
+ struct channel_param ch_param[];
+};
+
+struct wmi_scan_chan_list_cmd {
+ u32 tlv_header;
+ u32 num_scan_chans;
+ u32 flags;
+ u32 pdev_id;
+} __packed;
+
+struct wmi_scan_prob_req_oui_cmd {
+ u32 tlv_header;
+ u32 prob_req_oui;
+} __packed;
+
+#define WMI_MGMT_SEND_DOWNLD_LEN 64
+
+#define WMI_TX_PARAMS_DWORD0_POWER GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD0_MCS_MASK GENMASK(19, 8)
+#define WMI_TX_PARAMS_DWORD0_NSS_MASK GENMASK(27, 20)
+#define WMI_TX_PARAMS_DWORD0_RETRY_LIMIT GENMASK(31, 28)
+
+#define WMI_TX_PARAMS_DWORD1_CHAIN_MASK GENMASK(7, 0)
+#define WMI_TX_PARAMS_DWORD1_BW_MASK GENMASK(14, 8)
+#define WMI_TX_PARAMS_DWORD1_PREAMBLE_TYPE GENMASK(19, 15)
+#define WMI_TX_PARAMS_DWORD1_FRAME_TYPE BIT(20)
+#define WMI_TX_PARAMS_DWORD1_RSVD GENMASK(31, 21)
+
+struct wmi_mgmt_send_params {
+ u32 tlv_header;
+ u32 tx_params_dword0;
+ u32 tx_params_dword1;
+};
+
+struct wmi_mgmt_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 desc_id;
+ u32 chanfreq;
+ u32 paddr_lo;
+ u32 paddr_hi;
+ u32 frame_len;
+ u32 buf_len;
+ u32 tx_params_valid;
+
+ /* This TLV is followed by struct wmi_mgmt_frame */
+
+ /* Followed by struct wmi_mgmt_send_params */
+} __packed;
+
+struct wmi_sta_powersave_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 sta_ps_mode;
+};
+
+struct wmi_sta_smps_force_mode_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 forced_mode;
+};
+
+struct wmi_sta_smps_param_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct wmi_bcn_prb_info {
+ u32 tlv_header;
+ u32 caps;
+ u32 erp;
+} __packed;
+
+enum {
+ WMI_PDEV_SUSPEND,
+ WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+};
+
+struct green_ap_ps_params {
+ u32 value;
+};
+
+struct wmi_pdev_green_ap_ps_enable_cmd_param {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+};
+
+struct ap_ps_params {
+ u32 vdev_id;
+ u32 param;
+ u32 value;
+};
+
+struct vdev_set_params {
+ u32 if_id;
+ u32 param_id;
+ u32 param_value;
+};
+
+struct stats_request_params {
+ u32 stats_id;
+ u32 vdev_id;
+ u32 pdev_id;
+};
+
+struct wmi_set_current_country_params {
+ u8 alpha2[3];
+};
+
+struct wmi_set_current_country_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 new_alpha2;
+} __packed;
+
+enum set_init_cc_type {
+ WMI_COUNTRY_INFO_TYPE_ALPHA,
+ WMI_COUNTRY_INFO_TYPE_COUNTRY_CODE,
+ WMI_COUNTRY_INFO_TYPE_REGDOMAIN,
+};
+
+enum set_init_cc_flags {
+ INVALID_CC,
+ CC_IS_SET,
+ REGDMN_IS_SET,
+ ALPHA_IS_SET,
+};
+
+struct wmi_init_country_params {
+ union {
+ u16 country_code;
+ u16 regdom_id;
+ u8 alpha2[3];
+ } cc_info;
+ enum set_init_cc_flags flags;
+};
+
+struct wmi_init_country_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 init_cc_type;
+ union {
+ u32 country_code;
+ u32 regdom_id;
+ u32 alpha2;
+ } cc_info;
+} __packed;
+
+struct wmi_11d_scan_start_params {
+ u32 vdev_id;
+ u32 scan_period_msec;
+ u32 start_interval_msec;
+};
+
+struct wmi_11d_scan_start_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 scan_period_msec;
+ u32 start_interval_msec;
+} __packed;
+
+struct wmi_11d_scan_stop_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_11d_new_cc_ev {
+ u32 new_alpha2;
+} __packed;
+
+#define THERMAL_LEVELS 1
+struct tt_level_config {
+ u32 tmplwm;
+ u32 tmphwm;
+ u32 dcoffpercent;
+ u32 priority;
+};
+
+struct thermal_mitigation_params {
+ u32 pdev_id;
+ u32 enable;
+ u32 dc;
+ u32 dc_per_event;
+ struct tt_level_config levelconf[THERMAL_LEVELS];
+};
+
+struct wmi_therm_throt_config_request_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ u32 dc;
+ u32 dc_per_event;
+ u32 therm_throt_levels;
+} __packed;
+
+struct wmi_therm_throt_level_config_info {
+ u32 tlv_header;
+ u32 temp_lwm;
+ u32 temp_hwm;
+ u32 dc_off_percent;
+ u32 prio;
+} __packed;
+
+struct wmi_delba_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 initiator;
+ u32 reasoncode;
+} __packed;
+
+struct wmi_addba_setresponse_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 statuscode;
+} __packed;
+
+struct wmi_addba_send_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ u32 buffersize;
+} __packed;
+
+struct wmi_addba_clear_resp_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_info {
+ u32 tlv_header;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_pdev_pktlog_filter_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ u32 filter_type;
+ u32 num_mac;
+} __packed;
+
+enum ath11k_wmi_pktlog_enable {
+ ATH11K_WMI_PKTLOG_ENABLE_AUTO = 0,
+ ATH11K_WMI_PKTLOG_ENABLE_FORCE = 1,
+};
+
+struct wmi_pktlog_enable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 evlist; /* WMI_PKTLOG_EVENT */
+ u32 enable;
+} __packed;
+
+struct wmi_pktlog_disable_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+#define DFS_PHYERR_UNIT_TEST_CMD 0
+#define DFS_UNIT_TEST_MODULE 0x2b
+#define DFS_UNIT_TEST_TOKEN 0xAA
+
+enum dfs_test_args_idx {
+ DFS_TEST_CMDID = 0,
+ DFS_TEST_PDEV_ID,
+ DFS_TEST_RADAR_PARAM,
+ DFS_MAX_TEST_ARGS,
+};
+
+struct wmi_dfs_unit_test_arg {
+ u32 cmd_id;
+ u32 pdev_id;
+ u32 radar_param;
+};
+
+struct wmi_unit_test_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 module_id;
+ u32 num_args;
+ u32 diag_token;
+ /* Followed by test args*/
+} __packed;
+
+#define MAX_SUPPORTED_RATES 128
+
+#define WMI_PEER_AUTH 0x00000001
+#define WMI_PEER_QOS 0x00000002
+#define WMI_PEER_NEED_PTK_4_WAY 0x00000004
+#define WMI_PEER_NEED_GTK_2_WAY 0x00000010
+#define WMI_PEER_HE 0x00000400
+#define WMI_PEER_APSD 0x00000800
+#define WMI_PEER_HT 0x00001000
+#define WMI_PEER_40MHZ 0x00002000
+#define WMI_PEER_STBC 0x00008000
+#define WMI_PEER_LDPC 0x00010000
+#define WMI_PEER_DYN_MIMOPS 0x00020000
+#define WMI_PEER_STATIC_MIMOPS 0x00040000
+#define WMI_PEER_SPATIAL_MUX 0x00200000
+#define WMI_PEER_TWT_REQ 0x00400000
+#define WMI_PEER_TWT_RESP 0x00800000
+#define WMI_PEER_VHT 0x02000000
+#define WMI_PEER_80MHZ 0x04000000
+#define WMI_PEER_PMF 0x08000000
+/* TODO: Place holder for WLAN_PEER_F_PS_PRESEND_REQUIRED = 0x10000000.
+ * Need to be cleaned up
+ */
+#define WMI_PEER_IS_P2P_CAPABLE 0x20000000
+#define WMI_PEER_160MHZ 0x40000000
+#define WMI_PEER_SAFEMODE_EN 0x80000000
+
+struct beacon_tmpl_params {
+ u8 vdev_id;
+ u32 tim_ie_offset;
+ u32 tmpl_len;
+ u32 tmpl_len_aligned;
+ u32 csa_switch_count_offset;
+ u32 ext_csa_switch_count_offset;
+ u8 *frm;
+};
+
+struct wmi_rate_set {
+ u32 num_rates;
+ u32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
+};
+
+struct wmi_vht_rate_set {
+ u32 tlv_header;
+ u32 rx_max_rate;
+ u32 rx_mcs_set;
+ u32 tx_max_rate;
+ u32 tx_mcs_set;
+ u32 tx_max_mcs_nss;
+} __packed;
+
+struct wmi_he_rate_set {
+ u32 tlv_header;
+
+ /* MCS at which the peer can receive */
+ u32 rx_mcs_set;
+
+ /* MCS at which the peer can transmit */
+ u32 tx_mcs_set;
+} __packed;
+
+#define MAX_REG_RULES 10
+#define REG_ALPHA2_LEN 2
+#define MAX_6GHZ_REG_RULES 5
+
+enum wmi_start_event_param {
+ WMI_VDEV_START_RESP_EVENT = 0,
+ WMI_VDEV_RESTART_RESP_EVENT,
+};
+
+struct wmi_vdev_start_resp_event {
+ u32 vdev_id;
+ u32 requestor_id;
+ enum wmi_start_event_param resp_type;
+ u32 status;
+ u32 chain_mask;
+ u32 smps_mode;
+ union {
+ u32 mac_id;
+ u32 pdev_id;
+ };
+ u32 cfgd_tx_streams;
+ u32 cfgd_rx_streams;
+} __packed;
+
+/* VDEV start response status codes */
+enum wmi_vdev_start_resp_status_code {
+ WMI_VDEV_START_RESPONSE_STATUS_SUCCESS = 0,
+ WMI_VDEV_START_RESPONSE_INVALID_VDEVID = 1,
+ WMI_VDEV_START_RESPONSE_NOT_SUPPORTED = 2,
+ WMI_VDEV_START_RESPONSE_DFS_VIOLATION = 3,
+ WMI_VDEV_START_RESPONSE_INVALID_REGDOMAIN = 4,
+};
+
+/* Regaulatory Rule Flags Passed by FW */
+#define REGULATORY_CHAN_DISABLED BIT(0)
+#define REGULATORY_CHAN_NO_IR BIT(1)
+#define REGULATORY_CHAN_RADAR BIT(3)
+#define REGULATORY_CHAN_NO_OFDM BIT(6)
+#define REGULATORY_CHAN_INDOOR_ONLY BIT(9)
+
+#define REGULATORY_CHAN_NO_HT40 BIT(4)
+#define REGULATORY_CHAN_NO_80MHZ BIT(7)
+#define REGULATORY_CHAN_NO_160MHZ BIT(8)
+#define REGULATORY_CHAN_NO_20MHZ BIT(11)
+#define REGULATORY_CHAN_NO_10MHZ BIT(12)
+
+enum wmi_reg_chan_list_cmd_type {
+ WMI_REG_CHAN_LIST_CC_ID = 0,
+ WMI_REG_CHAN_LIST_CC_EXT_ID = 1,
+};
+
+enum wmi_reg_cc_setting_code {
+ WMI_REG_SET_CC_STATUS_PASS = 0,
+ WMI_REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ WMI_REG_INIT_ALPHA2_NOT_FOUND = 2,
+ WMI_REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ WMI_REG_SET_CC_STATUS_NO_MEMORY = 4,
+ WMI_REG_SET_CC_STATUS_FAIL = 5,
+
+ /* add new setting code above, update in
+ * @enum cc_setting_code as well.
+ * Also handle it in ath11k_wmi_cc_setting_code_to_reg()
+ */
+};
+
+enum cc_setting_code {
+ REG_SET_CC_STATUS_PASS = 0,
+ REG_CURRENT_ALPHA2_NOT_FOUND = 1,
+ REG_INIT_ALPHA2_NOT_FOUND = 2,
+ REG_SET_CC_CHANGE_NOT_ALLOWED = 3,
+ REG_SET_CC_STATUS_NO_MEMORY = 4,
+ REG_SET_CC_STATUS_FAIL = 5,
+
+ /* add new setting code above, update in
+ * @enum wmi_reg_cc_setting_code as well.
+ * Also handle it in ath11k_cc_status_to_str()
+ */
+};
+
+static inline enum cc_setting_code
+ath11k_wmi_cc_setting_code_to_reg(enum wmi_reg_cc_setting_code status_code)
+{
+ switch (status_code) {
+ case WMI_REG_SET_CC_STATUS_PASS:
+ return REG_SET_CC_STATUS_PASS;
+ case WMI_REG_CURRENT_ALPHA2_NOT_FOUND:
+ return REG_CURRENT_ALPHA2_NOT_FOUND;
+ case WMI_REG_INIT_ALPHA2_NOT_FOUND:
+ return REG_INIT_ALPHA2_NOT_FOUND;
+ case WMI_REG_SET_CC_CHANGE_NOT_ALLOWED:
+ return REG_SET_CC_CHANGE_NOT_ALLOWED;
+ case WMI_REG_SET_CC_STATUS_NO_MEMORY:
+ return REG_SET_CC_STATUS_NO_MEMORY;
+ case WMI_REG_SET_CC_STATUS_FAIL:
+ return REG_SET_CC_STATUS_FAIL;
+ }
+
+ return REG_SET_CC_STATUS_FAIL;
+}
+
+static inline const char *ath11k_cc_status_to_str(enum cc_setting_code code)
+{
+ switch (code) {
+ case REG_SET_CC_STATUS_PASS:
+ return "REG_SET_CC_STATUS_PASS";
+ case REG_CURRENT_ALPHA2_NOT_FOUND:
+ return "REG_CURRENT_ALPHA2_NOT_FOUND";
+ case REG_INIT_ALPHA2_NOT_FOUND:
+ return "REG_INIT_ALPHA2_NOT_FOUND";
+ case REG_SET_CC_CHANGE_NOT_ALLOWED:
+ return "REG_SET_CC_CHANGE_NOT_ALLOWED";
+ case REG_SET_CC_STATUS_NO_MEMORY:
+ return "REG_SET_CC_STATUS_NO_MEMORY";
+ case REG_SET_CC_STATUS_FAIL:
+ return "REG_SET_CC_STATUS_FAIL";
+ }
+
+ return "Unknown CC status";
+}
+
+enum wmi_reg_6ghz_ap_type {
+ WMI_REG_INDOOR_AP = 0,
+ WMI_REG_STANDARD_POWER_AP = 1,
+ WMI_REG_VERY_LOW_POWER_AP = 2,
+
+ /* add AP type above, handle in ath11k_6ghz_ap_type_to_str()
+ */
+ WMI_REG_CURRENT_MAX_AP_TYPE,
+ WMI_REG_MAX_AP_TYPE = 7,
+};
+
+static inline const char *
+ath11k_6ghz_ap_type_to_str(enum wmi_reg_6ghz_ap_type type)
+{
+ switch (type) {
+ case WMI_REG_INDOOR_AP:
+ return "INDOOR AP";
+ case WMI_REG_STANDARD_POWER_AP:
+ return "STANDARD POWER AP";
+ case WMI_REG_VERY_LOW_POWER_AP:
+ return "VERY LOW POWER AP";
+ case WMI_REG_CURRENT_MAX_AP_TYPE:
+ return "CURRENT_MAX_AP_TYPE";
+ case WMI_REG_MAX_AP_TYPE:
+ return "MAX_AP_TYPE";
+ }
+
+ return "unknown 6 GHz AP type";
+}
+
+enum wmi_reg_6ghz_client_type {
+ WMI_REG_DEFAULT_CLIENT = 0,
+ WMI_REG_SUBORDINATE_CLIENT = 1,
+ WMI_REG_MAX_CLIENT_TYPE = 2,
+
+ /* add client type above, handle it in
+ * ath11k_6ghz_client_type_to_str()
+ */
+};
+
+static inline const char *
+ath11k_6ghz_client_type_to_str(enum wmi_reg_6ghz_client_type type)
+{
+ switch (type) {
+ case WMI_REG_DEFAULT_CLIENT:
+ return "DEFAULT CLIENT";
+ case WMI_REG_SUBORDINATE_CLIENT:
+ return "SUBORDINATE CLIENT";
+ case WMI_REG_MAX_CLIENT_TYPE:
+ return "MAX_CLIENT_TYPE";
+ }
+
+ return "unknown 6 GHz client type";
+}
+
+enum reg_subdomains_6ghz {
+ EMPTY_6GHZ = 0x0,
+ FCC1_CLIENT_LPI_REGULAR_6GHZ = 0x01,
+ FCC1_CLIENT_SP_6GHZ = 0x02,
+ FCC1_AP_LPI_6GHZ = 0x03,
+ FCC1_CLIENT_LPI_SUBORDINATE = FCC1_AP_LPI_6GHZ,
+ FCC1_AP_SP_6GHZ = 0x04,
+ ETSI1_LPI_6GHZ = 0x10,
+ ETSI1_VLP_6GHZ = 0x11,
+ ETSI2_LPI_6GHZ = 0x12,
+ ETSI2_VLP_6GHZ = 0x13,
+ APL1_LPI_6GHZ = 0x20,
+ APL1_VLP_6GHZ = 0x21,
+
+ /* add sub-domain above, handle it in
+ * ath11k_sub_reg_6ghz_to_str()
+ */
+};
+
+static inline const char *
+ath11k_sub_reg_6ghz_to_str(enum reg_subdomains_6ghz sub_id)
+{
+ switch (sub_id) {
+ case EMPTY_6GHZ:
+ return "N/A";
+ case FCC1_CLIENT_LPI_REGULAR_6GHZ:
+ return "FCC1_CLIENT_LPI_REGULAR_6GHZ";
+ case FCC1_CLIENT_SP_6GHZ:
+ return "FCC1_CLIENT_SP_6GHZ";
+ case FCC1_AP_LPI_6GHZ:
+ return "FCC1_AP_LPI_6GHZ/FCC1_CLIENT_LPI_SUBORDINATE";
+ case FCC1_AP_SP_6GHZ:
+ return "FCC1_AP_SP_6GHZ";
+ case ETSI1_LPI_6GHZ:
+ return "ETSI1_LPI_6GHZ";
+ case ETSI1_VLP_6GHZ:
+ return "ETSI1_VLP_6GHZ";
+ case ETSI2_LPI_6GHZ:
+ return "ETSI2_LPI_6GHZ";
+ case ETSI2_VLP_6GHZ:
+ return "ETSI2_VLP_6GHZ";
+ case APL1_LPI_6GHZ:
+ return "APL1_LPI_6GHZ";
+ case APL1_VLP_6GHZ:
+ return "APL1_VLP_6GHZ";
+ }
+
+ return "unknown sub reg id";
+}
+
+enum reg_super_domain_6ghz {
+ FCC1_6GHZ = 0x01,
+ ETSI1_6GHZ = 0x02,
+ ETSI2_6GHZ = 0x03,
+ APL1_6GHZ = 0x04,
+ FCC1_6GHZ_CL = 0x05,
+
+ /* add super domain above, handle it in
+ * ath11k_super_reg_6ghz_to_str()
+ */
+};
+
+static inline const char *
+ath11k_super_reg_6ghz_to_str(enum reg_super_domain_6ghz domain_id)
+{
+ switch (domain_id) {
+ case FCC1_6GHZ:
+ return "FCC1_6GHZ";
+ case ETSI1_6GHZ:
+ return "ETSI1_6GHZ";
+ case ETSI2_6GHZ:
+ return "ETSI2_6GHZ";
+ case APL1_6GHZ:
+ return "APL1_6GHZ";
+ case FCC1_6GHZ_CL:
+ return "FCC1_6GHZ_CL";
+ }
+
+ return "unknown domain id";
+}
+
+struct cur_reg_rule {
+ u16 start_freq;
+ u16 end_freq;
+ u16 max_bw;
+ u8 reg_power;
+ u8 ant_gain;
+ u16 flags;
+ bool psd_flag;
+ s8 psd_eirp;
+};
+
+struct cur_regulatory_info {
+ enum cc_setting_code status_code;
+ u8 num_phy;
+ u8 phy_id;
+ u16 reg_dmn_pair;
+ u16 ctry_code;
+ u8 alpha2[REG_ALPHA2_LEN + 1];
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2ghz;
+ u32 max_bw_2ghz;
+ u32 min_bw_5ghz;
+ u32 max_bw_5ghz;
+ u32 num_2ghz_reg_rules;
+ u32 num_5ghz_reg_rules;
+ struct cur_reg_rule *reg_rules_2ghz_ptr;
+ struct cur_reg_rule *reg_rules_5ghz_ptr;
+ bool is_ext_reg_event;
+ enum wmi_reg_6ghz_client_type client_type;
+ bool rnr_tpe_usable;
+ bool unspecified_ap_usable;
+ u8 domain_code_6ghz_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u8 domain_code_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 domain_code_6ghz_super_id;
+ u32 min_bw_6ghz_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 max_bw_6ghz_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 min_bw_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 max_bw_6ghz_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u32 num_6ghz_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
+ u32 num_6ghz_rules_client[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ struct cur_reg_rule *reg_rules_6ghz_ap_ptr[WMI_REG_CURRENT_MAX_AP_TYPE];
+ struct cur_reg_rule *reg_rules_6ghz_client_ptr
+ [WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+};
+
+struct wmi_reg_chan_list_cc_event {
+ u32 status_code;
+ u32 phy_id;
+ u32 alpha2;
+ u32 num_phy;
+ u32 country_id;
+ u32 domain_code;
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2ghz;
+ u32 max_bw_2ghz;
+ u32 min_bw_5ghz;
+ u32 max_bw_5ghz;
+ u32 num_2ghz_reg_rules;
+ u32 num_5ghz_reg_rules;
+} __packed;
+
+struct wmi_regulatory_rule_struct {
+ u32 tlv_header;
+ u32 freq_info;
+ u32 bw_pwr_info;
+ u32 flag_info;
+};
+
+#define WMI_REG_CLIENT_MAX 4
+
+struct wmi_reg_chan_list_cc_ext_event {
+ u32 status_code;
+ u32 phy_id;
+ u32 alpha2;
+ u32 num_phy;
+ u32 country_id;
+ u32 domain_code;
+ u32 dfs_region;
+ u32 phybitmap;
+ u32 min_bw_2ghz;
+ u32 max_bw_2ghz;
+ u32 min_bw_5ghz;
+ u32 max_bw_5ghz;
+ u32 num_2ghz_reg_rules;
+ u32 num_5ghz_reg_rules;
+ u32 client_type;
+ u32 rnr_tpe_usable;
+ u32 unspecified_ap_usable;
+ u32 domain_code_6ghz_ap_lpi;
+ u32 domain_code_6ghz_ap_sp;
+ u32 domain_code_6ghz_ap_vlp;
+ u32 domain_code_6ghz_client_lpi[WMI_REG_CLIENT_MAX];
+ u32 domain_code_6ghz_client_sp[WMI_REG_CLIENT_MAX];
+ u32 domain_code_6ghz_client_vlp[WMI_REG_CLIENT_MAX];
+ u32 domain_code_6ghz_super_id;
+ u32 min_bw_6ghz_ap_sp;
+ u32 max_bw_6ghz_ap_sp;
+ u32 min_bw_6ghz_ap_lpi;
+ u32 max_bw_6ghz_ap_lpi;
+ u32 min_bw_6ghz_ap_vlp;
+ u32 max_bw_6ghz_ap_vlp;
+ u32 min_bw_6ghz_client_sp[WMI_REG_CLIENT_MAX];
+ u32 max_bw_6ghz_client_sp[WMI_REG_CLIENT_MAX];
+ u32 min_bw_6ghz_client_lpi[WMI_REG_CLIENT_MAX];
+ u32 max_bw_6ghz_client_lpi[WMI_REG_CLIENT_MAX];
+ u32 min_bw_6ghz_client_vlp[WMI_REG_CLIENT_MAX];
+ u32 max_bw_6ghz_client_vlp[WMI_REG_CLIENT_MAX];
+ u32 num_6ghz_reg_rules_ap_sp;
+ u32 num_6ghz_reg_rules_ap_lpi;
+ u32 num_6ghz_reg_rules_ap_vlp;
+ u32 num_6ghz_reg_rules_client_sp[WMI_REG_CLIENT_MAX];
+ u32 num_6ghz_reg_rules_client_lpi[WMI_REG_CLIENT_MAX];
+ u32 num_6ghz_reg_rules_client_vlp[WMI_REG_CLIENT_MAX];
+} __packed;
+
+struct wmi_regulatory_ext_rule {
+ u32 tlv_header;
+ u32 freq_info;
+ u32 bw_pwr_info;
+ u32 flag_info;
+ u32 psd_power_info;
+} __packed;
+
+struct wmi_vdev_delete_resp_event {
+ u32 vdev_id;
+} __packed;
+
+struct wmi_peer_delete_resp_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_bcn_tx_status_event {
+ u32 vdev_id;
+ u32 tx_status;
+} __packed;
+
+struct wmi_vdev_stopped_event {
+ u32 vdev_id;
+} __packed;
+
+struct wmi_pdev_bss_chan_info_event {
+ u32 freq; /* Units in MHz */
+ u32 noise_floor; /* units are dBm */
+ /* rx clear - how often the channel was unused */
+ u32 rx_clear_count_low;
+ u32 rx_clear_count_high;
+ /* cycle count - elapsed time during measured period, in clock ticks */
+ u32 cycle_count_low;
+ u32 cycle_count_high;
+ /* tx cycle count - elapsed time spent in tx, in clock ticks */
+ u32 tx_cycle_count_low;
+ u32 tx_cycle_count_high;
+ /* rx cycle count - elapsed time spent in rx, in clock ticks */
+ u32 rx_cycle_count_low;
+ u32 rx_cycle_count_high;
+ /*rx_cycle cnt for my bss in 64bits format */
+ u32 rx_bss_cycle_count_low;
+ u32 rx_bss_cycle_count_high;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
+
+struct wmi_vdev_install_key_compl_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+} __packed;
+
+struct wmi_vdev_install_key_complete_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+ u32 key_idx;
+ u32 key_flags;
+ u32 status;
+};
+
+struct wmi_peer_assoc_conf_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_peer_assoc_conf_arg {
+ u32 vdev_id;
+ const u8 *macaddr;
+};
+
+struct wmi_fils_discovery_event {
+ u32 vdev_id;
+ u32 fils_tt;
+ u32 tbtt;
+} __packed;
+
+struct wmi_probe_resp_tx_status_event {
+ u32 vdev_id;
+ u32 tx_status;
+} __packed;
+
+/*
+ * PDEV statistics
+ */
+struct wmi_pdev_stats_base {
+ s32 chan_nf;
+ u32 tx_frame_count; /* Cycles spent transmitting frames */
+ u32 rx_frame_count; /* Cycles spent receiving frames */
+ u32 rx_clear_count; /* Total channel busy time, evidently */
+ u32 cycle_count; /* Total on-channel time */
+ u32 phy_err_count;
+ u32 chan_tx_pwr;
+} __packed;
+
+struct wmi_pdev_stats_extra {
+ u32 ack_rx_bad;
+ u32 rts_bad;
+ u32 rts_good;
+ u32 fcs_bad;
+ u32 no_beacons;
+ u32 mib_int_count;
+} __packed;
+
+struct wmi_pdev_stats_tx {
+ /* Num HTT cookies queued to dispatch list */
+ s32 comp_queued;
+
+ /* Num HTT cookies dispatched */
+ s32 comp_delivered;
+
+ /* Num MSDU queued to WAL */
+ s32 msdu_enqued;
+
+ /* Num MPDU queue to WAL */
+ s32 mpdu_enqued;
+
+ /* Num MSDUs dropped by WMM limit */
+ s32 wmm_drop;
+
+ /* Num Local frames queued */
+ s32 local_enqued;
+
+ /* Num Local frames done */
+ s32 local_freed;
+
+ /* Num queued to HW */
+ s32 hw_queued;
+
+ /* Num PPDU reaped from HW */
+ s32 hw_reaped;
+
+ /* Num underruns */
+ s32 underrun;
+
+ /* Num hw paused */
+ u32 hw_paused;
+
+ /* Num PPDUs cleaned up in TX abort */
+ s32 tx_abort;
+
+ /* Num MPDUs requeued by SW */
+ s32 mpdus_requeued;
+
+ /* excessive retries */
+ u32 tx_ko;
+
+ u32 tx_xretry;
+
+ /* data hw rate code */
+ u32 data_rc;
+
+ /* Scheduler self triggers */
+ u32 self_triggers;
+
+ /* frames dropped due to excessive sw retries */
+ u32 sw_retry_failure;
+
+ /* illegal rate phy errors */
+ u32 illgl_rate_phy_err;
+
+ /* wal pdev continuous xretry */
+ u32 pdev_cont_xretry;
+
+ /* wal pdev tx timeouts */
+ u32 pdev_tx_timeout;
+
+ /* wal pdev resets */
+ u32 pdev_resets;
+
+ /* frames dropped due to non-availability of stateless TIDs */
+ u32 stateless_tid_alloc_failure;
+
+ /* PhY/BB underrun */
+ u32 phy_underrun;
+
+ /* MPDU is more than txop limit */
+ u32 txop_ovf;
+
+ /* Num sequences posted */
+ u32 seq_posted;
+
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+
+ /* Num sequences completed */
+ u32 seq_completed;
+
+ /* Num sequences restarted */
+ u32 seq_restarted;
+
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
+} __packed;
+
+struct wmi_pdev_stats_rx {
+ /* Cnts any change in ring routing mid-ppdu */
+ s32 mid_ppdu_route_change;
+
+ /* Total number of statuses processed */
+ s32 status_rcvd;
+
+ /* Extra frags on rings 0-3 */
+ s32 r0_frags;
+ s32 r1_frags;
+ s32 r2_frags;
+ s32 r3_frags;
+
+ /* MSDUs / MPDUs delivered to HTT */
+ s32 htt_msdus;
+ s32 htt_mpdus;
+
+ /* MSDUs / MPDUs delivered to local stack */
+ s32 loc_msdus;
+ s32 loc_mpdus;
+
+ /* AMSDUs that have more MSDUs than the status ring size */
+ s32 oversize_amsdu;
+
+ /* Number of PHY errors */
+ s32 phy_errs;
+
+ /* Number of PHY errors drops */
+ s32 phy_err_drop;
+
+ /* Number of mpdu errors - FCS, MIC, ENC etc. */
+ s32 mpdu_errs;
+
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
+} __packed;
+
+struct wmi_pdev_stats {
+ struct wmi_pdev_stats_base base;
+ struct wmi_pdev_stats_tx tx;
+ struct wmi_pdev_stats_rx rx;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats {
+ u32 vdev_id;
+ u32 beacon_snr;
+ u32 data_snr;
+ u32 num_tx_frames[WLAN_MAX_AC];
+ u32 num_rx_frames;
+ u32 num_tx_frames_retries[WLAN_MAX_AC];
+ u32 num_tx_frames_failures[WLAN_MAX_AC];
+ u32 num_rts_fail;
+ u32 num_rts_success;
+ u32 num_rx_err;
+ u32 num_rx_discard;
+ u32 num_tx_not_acked;
+ u32 tx_rate_history[MAX_TX_RATE_VALUES];
+ u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct wmi_bcn_stats {
+ u32 vdev_id;
+ u32 tx_bcn_succ_cnt;
+ u32 tx_bcn_outage_cnt;
+} __packed;
+
+struct wmi_stats_event {
+ u32 stats_id;
+ u32 num_pdev_stats;
+ u32 num_vdev_stats;
+ u32 num_peer_stats;
+ u32 num_bcnflt_stats;
+ u32 num_chan_stats;
+ u32 num_mib_stats;
+ u32 pdev_id;
+ u32 num_bcn_stats;
+ u32 num_peer_extd_stats;
+ u32 num_peer_extd2_stats;
+} __packed;
+
+struct wmi_rssi_stats {
+ u32 vdev_id;
+ u32 rssi_avg_beacon[WMI_MAX_CHAINS];
+ u32 rssi_avg_data[WMI_MAX_CHAINS];
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+struct wmi_per_chain_rssi_stats {
+ u32 num_per_chain_rssi_stats;
+} __packed;
+
+struct wmi_pdev_ctl_failsafe_chk_event {
+ u32 pdev_id;
+ u32 ctl_failsafe_status;
+} __packed;
+
+struct wmi_pdev_csa_switch_ev {
+ u32 pdev_id;
+ u32 current_switch_count;
+ u32 num_vdevs;
+} __packed;
+
+struct wmi_pdev_radar_ev {
+ u32 pdev_id;
+ u32 detection_mode;
+ u32 chan_freq;
+ u32 chan_width;
+ u32 detector_id;
+ u32 segment_id;
+ u32 timestamp;
+ u32 is_chirp;
+ s32 freq_offset;
+ s32 sidx;
+} __packed;
+
+struct wmi_pdev_temperature_event {
+ /* temperature value in Celsius degree */
+ s32 temp;
+ u32 pdev_id;
+} __packed;
+
+#define WMI_RX_STATUS_OK 0x00
+#define WMI_RX_STATUS_ERR_CRC 0x01
+#define WMI_RX_STATUS_ERR_DECRYPT 0x08
+#define WMI_RX_STATUS_ERR_MIC 0x10
+#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+
+#define WLAN_MGMT_TXRX_HOST_MAX_ANTENNA 4
+
+struct mgmt_rx_event_params {
+ u32 chan_freq;
+ u32 channel;
+ u32 snr;
+ u8 rssi_ctl[WLAN_MGMT_TXRX_HOST_MAX_ANTENNA];
+ u32 rate;
+ enum wmi_phy_mode phy_mode;
+ u32 buf_len;
+ int status;
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u8 pdev_id;
+};
+
+#define ATH_MAX_ANTENNA 4
+
+struct wmi_mgmt_rx_hdr {
+ u32 channel;
+ u32 snr;
+ u32 rate;
+ u32 phy_mode;
+ u32 buf_len;
+ u32 status;
+ u32 rssi_ctl[ATH_MAX_ANTENNA];
+ u32 flags;
+ int rssi;
+ u32 tsf_delta;
+ u32 rx_tsf_l32;
+ u32 rx_tsf_u32;
+ u32 pdev_id;
+ u32 chan_freq;
+} __packed;
+
+#define MAX_ANTENNA_EIGHT 8
+
+struct wmi_rssi_ctl_ext {
+ u32 tlv_header;
+ u32 rssi_ctl_ext[MAX_ANTENNA_EIGHT - ATH_MAX_ANTENNA];
+};
+
+struct wmi_mgmt_tx_compl_event {
+ u32 desc_id;
+ u32 status;
+ u32 pdev_id;
+ u32 ppdu_id;
+ u32 ack_rssi;
+} __packed;
+
+struct wmi_scan_event {
+ u32 event_type; /* %WMI_SCAN_EVENT_ */
+ u32 reason; /* %WMI_SCAN_REASON_ */
+ u32 channel_freq; /* only valid for WMI_SCAN_EVENT_FOREIGN_CHANNEL */
+ u32 scan_req_id;
+ u32 scan_id;
+ u32 vdev_id;
+ /* TSF Timestamp when the scan event (%WMI_SCAN_EVENT_) is completed
+ * In case of AP it is TSF of the AP vdev
+ * In case of STA connected state, this is the TSF of the AP
+ * In case of STA not connected, it will be the free running HW timer
+ */
+ u32 tsf_timestamp;
+} __packed;
+
+struct wmi_peer_sta_kickout_arg {
+ const u8 *mac_addr;
+};
+
+struct wmi_peer_sta_kickout_event {
+ struct wmi_mac_addr peer_macaddr;
+} __packed;
+
+enum wmi_roam_reason {
+ WMI_ROAM_REASON_BETTER_AP = 1,
+ WMI_ROAM_REASON_BEACON_MISS = 2,
+ WMI_ROAM_REASON_LOW_RSSI = 3,
+ WMI_ROAM_REASON_SUITABLE_AP_FOUND = 4,
+ WMI_ROAM_REASON_HO_FAILED = 5,
+
+ /* keep last */
+ WMI_ROAM_REASON_MAX,
+};
+
+struct wmi_roam_event {
+ u32 vdev_id;
+ u32 reason;
+ u32 rssi;
+} __packed;
+
+#define WMI_CHAN_INFO_START_RESP 0
+#define WMI_CHAN_INFO_END_RESP 1
+
+struct wmi_chan_info_event {
+ u32 err_code;
+ u32 freq;
+ u32 cmd_flags;
+ u32 noise_floor;
+ u32 rx_clear_count;
+ u32 cycle_count;
+ u32 chan_tx_pwr_range;
+ u32 chan_tx_pwr_tp;
+ u32 rx_frame_count;
+ u32 my_bss_rx_cycle_count;
+ u32 rx_11b_mode_data_duration;
+ u32 tx_frame_cnt;
+ u32 mac_clk_mhz;
+ u32 vdev_id;
+} __packed;
+
+struct ath11k_targ_cap {
+ u32 phy_capability;
+ u32 max_frag_entry;
+ u32 num_rf_chains;
+ u32 ht_cap_info;
+ u32 vht_cap_info;
+ u32 vht_supp_mcs;
+ u32 hw_min_tx_power;
+ u32 hw_max_tx_power;
+ u32 sys_cap_info;
+ u32 min_pkt_size_enable;
+ u32 max_bcn_ie_size;
+ u32 max_num_scan_channels;
+ u32 max_supported_macs;
+ u32 wmi_fw_sub_feat_caps;
+ u32 txrx_chainmask;
+ u32 default_dbs_hw_mode_index;
+ u32 num_msdu_desc;
+};
+
+enum wmi_vdev_type {
+ WMI_VDEV_TYPE_AP = 1,
+ WMI_VDEV_TYPE_STA = 2,
+ WMI_VDEV_TYPE_IBSS = 3,
+ WMI_VDEV_TYPE_MONITOR = 4,
+};
+
+enum wmi_vdev_subtype {
+ WMI_VDEV_SUBTYPE_NONE,
+ WMI_VDEV_SUBTYPE_P2P_DEVICE,
+ WMI_VDEV_SUBTYPE_P2P_CLIENT,
+ WMI_VDEV_SUBTYPE_P2P_GO,
+ WMI_VDEV_SUBTYPE_PROXY_STA,
+ WMI_VDEV_SUBTYPE_MESH_NON_11S,
+ WMI_VDEV_SUBTYPE_MESH_11S,
+};
+
+enum wmi_sta_powersave_param {
+ WMI_STA_PS_PARAM_RX_WAKE_POLICY = 0,
+ WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD = 1,
+ WMI_STA_PS_PARAM_PSPOLL_COUNT = 2,
+ WMI_STA_PS_PARAM_INACTIVITY_TIME = 3,
+ WMI_STA_PS_PARAM_UAPSD = 4,
+};
+
+#define WMI_UAPSD_AC_TYPE_DELI 0
+#define WMI_UAPSD_AC_TYPE_TRIG 1
+
+#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
+ ((type == WMI_UAPSD_AC_TYPE_DELI) ? \
+ (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
+
+enum wmi_sta_ps_param_uapsd {
+ WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_STA_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_STA_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_STA_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_STA_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_STA_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_STA_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_STA_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+#define WMI_STA_UAPSD_MAX_INTERVAL_MSEC UINT_MAX
+
+struct wmi_sta_uapsd_auto_trig_param {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+struct wmi_sta_uapsd_auto_trig_cmd_fixed_param {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 num_ac;
+};
+
+struct wmi_sta_uapsd_auto_trig_arg {
+ u32 wmm_ac;
+ u32 user_priority;
+ u32 service_interval;
+ u32 suspend_interval;
+ u32 delay_interval;
+};
+
+enum wmi_sta_ps_param_tx_wake_threshold {
+ WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER = 0,
+ WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS = 1,
+
+ /* Values greater than one indicate that many TX attempts per beacon
+ * interval before the STA will wake up
+ */
+};
+
+/* The maximum number of PS-Poll frames the FW will send in response to
+ * traffic advertised in TIM before waking up (by sending a null frame with PS
+ * = 0). Value 0 has a special meaning: there is no maximum count and the FW
+ * will send as many PS-Poll as are necessary to retrieve buffered BU. This
+ * parameter is used when the RX wake policy is
+ * WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD and ignored when the RX wake
+ * policy is WMI_STA_PS_RX_WAKE_POLICY_WAKE.
+ */
+enum wmi_sta_ps_param_pspoll_count {
+ WMI_STA_PS_PSPOLL_COUNT_NO_MAX = 0,
+ /* Values greater than 0 indicate the maximum number of PS-Poll frames
+ * FW will send before waking up.
+ */
+};
+
+/* U-APSD configuration of peer station from (re)assoc request and TSPECs */
+enum wmi_ap_ps_param_uapsd {
+ WMI_AP_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
+ WMI_AP_PS_UAPSD_AC0_TRIGGER_EN = (1 << 1),
+ WMI_AP_PS_UAPSD_AC1_DELIVERY_EN = (1 << 2),
+ WMI_AP_PS_UAPSD_AC1_TRIGGER_EN = (1 << 3),
+ WMI_AP_PS_UAPSD_AC2_DELIVERY_EN = (1 << 4),
+ WMI_AP_PS_UAPSD_AC2_TRIGGER_EN = (1 << 5),
+ WMI_AP_PS_UAPSD_AC3_DELIVERY_EN = (1 << 6),
+ WMI_AP_PS_UAPSD_AC3_TRIGGER_EN = (1 << 7),
+};
+
+/* U-APSD maximum service period of peer station */
+enum wmi_ap_ps_peer_param_max_sp {
+ WMI_AP_PS_PEER_PARAM_MAX_SP_UNLIMITED = 0,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_2 = 1,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_4 = 2,
+ WMI_AP_PS_PEER_PARAM_MAX_SP_6 = 3,
+ MAX_WMI_AP_PS_PEER_PARAM_MAX_SP,
+};
+
+enum wmi_ap_ps_peer_param {
+ /** Set uapsd configuration for a given peer.
+ *
+ * This include the delivery and trigger enabled state for each AC.
+ * The host MLME needs to set this based on AP capability and stations
+ * request Set in the association request received from the station.
+ *
+ * Lower 8 bits of the value specify the UAPSD configuration.
+ *
+ * (see enum wmi_ap_ps_param_uapsd)
+ * The default value is 0.
+ */
+ WMI_AP_PS_PEER_PARAM_UAPSD = 0,
+
+ /**
+ * Set the service period for a UAPSD capable station
+ *
+ * The service period from wme ie in the (re)assoc request frame.
+ *
+ * (see enum wmi_ap_ps_peer_param_max_sp)
+ */
+ WMI_AP_PS_PEER_PARAM_MAX_SP = 1,
+
+ /** Time in seconds for aging out buffered frames
+ * for STA in power save
+ */
+ WMI_AP_PS_PEER_PARAM_AGEOUT_TIME = 2,
+
+ /** Specify frame types that are considered SIFS
+ * RESP trigger frame
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE = 3,
+
+ /** Specifies the trigger state of TID.
+ * Valid only for UAPSD frame type
+ */
+ WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD = 4,
+
+ /* Specifies the WNM sleep state of a STA */
+ WMI_AP_PS_PEER_PARAM_WNM_SLEEP = 5,
+};
+
+#define DISABLE_SIFS_RESPONSE_TRIGGER 0
+
+#define WMI_MAX_KEY_INDEX 3
+#define WMI_MAX_KEY_LEN 32
+
+#define WMI_KEY_PAIRWISE 0x00
+#define WMI_KEY_GROUP 0x01
+
+#define WMI_CIPHER_NONE 0x0 /* clear key */
+#define WMI_CIPHER_WEP 0x1
+#define WMI_CIPHER_TKIP 0x2
+#define WMI_CIPHER_AES_OCB 0x3
+#define WMI_CIPHER_AES_CCM 0x4
+#define WMI_CIPHER_WAPI 0x5
+#define WMI_CIPHER_CKIP 0x6
+#define WMI_CIPHER_AES_CMAC 0x7
+#define WMI_CIPHER_ANY 0x8
+#define WMI_CIPHER_AES_GCM 0x9
+#define WMI_CIPHER_AES_GMAC 0xa
+
+/* Value to disable fixed rate setting */
+#define WMI_FIXED_RATE_NONE (0xffff)
+
+#define ATH11K_RC_VERSION_OFFSET 28
+#define ATH11K_RC_PREAMBLE_OFFSET 8
+#define ATH11K_RC_NSS_OFFSET 5
+
+#define ATH11K_HW_RATE_CODE(rate, nss, preamble) \
+ ((1 << ATH11K_RC_VERSION_OFFSET) | \
+ ((nss) << ATH11K_RC_NSS_OFFSET) | \
+ ((preamble) << ATH11K_RC_PREAMBLE_OFFSET) | \
+ (rate))
+
+/* Preamble types to be used with VDEV fixed rate configuration */
+enum wmi_rate_preamble {
+ WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_HT,
+ WMI_RATE_PREAMBLE_VHT,
+ WMI_RATE_PREAMBLE_HE,
+};
+
+/**
+ * enum wmi_rtscts_prot_mode - Enable/Disable RTS/CTS and CTS2Self Protection.
+ * @WMI_RTS_CTS_DISABLED: RTS/CTS protection is disabled.
+ * @WMI_USE_RTS_CTS: RTS/CTS Enabled.
+ * @WMI_USE_CTS2SELF: CTS to self protection Enabled.
+ */
+enum wmi_rtscts_prot_mode {
+ WMI_RTS_CTS_DISABLED = 0,
+ WMI_USE_RTS_CTS = 1,
+ WMI_USE_CTS2SELF = 2,
+};
+
+/**
+ * enum wmi_rtscts_profile - Selection of RTS CTS profile along with enabling
+ * protection mode.
+ * @WMI_RTSCTS_FOR_NO_RATESERIES: Neither of rate-series should use RTS-CTS
+ * @WMI_RTSCTS_FOR_SECOND_RATESERIES: Only second rate-series will use RTS-CTS
+ * @WMI_RTSCTS_ACROSS_SW_RETRIES: Only the second rate-series will use RTS-CTS,
+ * but if there's a sw retry, both the rate
+ * series will use RTS-CTS.
+ * @WMI_RTSCTS_ERP: RTS/CTS used for ERP protection for every PPDU.
+ * @WMI_RTSCTS_FOR_ALL_RATESERIES: Enable RTS-CTS for all rate series.
+ */
+enum wmi_rtscts_profile {
+ WMI_RTSCTS_FOR_NO_RATESERIES = 0,
+ WMI_RTSCTS_FOR_SECOND_RATESERIES = 1,
+ WMI_RTSCTS_ACROSS_SW_RETRIES = 2,
+ WMI_RTSCTS_ERP = 3,
+ WMI_RTSCTS_FOR_ALL_RATESERIES = 4,
+};
+
+struct ath11k_hal_reg_cap {
+ u32 eeprom_rd;
+ u32 eeprom_rd_ext;
+ u32 regcap1;
+ u32 regcap2;
+ u32 wireless_modes;
+ u32 low_2ghz_chan;
+ u32 high_2ghz_chan;
+ u32 low_5ghz_chan;
+ u32 high_5ghz_chan;
+};
+
+struct ath11k_mem_chunk {
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 req_id;
+};
+
+#define WMI_SKB_HEADROOM sizeof(struct wmi_cmd_hdr)
+
+enum wmi_sta_ps_param_rx_wake_policy {
+ WMI_STA_PS_RX_WAKE_POLICY_WAKE = 0,
+ WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD = 1,
+};
+
+/* Do not change existing values! Used by ath11k_frame_mode parameter
+ * module parameter.
+ */
+enum ath11k_hw_txrx_mode {
+ ATH11K_HW_TXRX_RAW = 0,
+ ATH11K_HW_TXRX_NATIVE_WIFI = 1,
+ ATH11K_HW_TXRX_ETHERNET = 2,
+};
+
+struct wmi_wmm_params {
+ u32 tlv_header;
+ u32 cwmin;
+ u32 cwmax;
+ u32 aifs;
+ u32 txoplimit;
+ u32 acm;
+ u32 no_ack;
+} __packed;
+
+struct wmi_wmm_params_arg {
+ u8 acm;
+ u8 aifs;
+ u16 cwmin;
+ u16 cwmax;
+ u16 txop;
+ u8 no_ack;
+};
+
+struct wmi_vdev_set_wmm_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_wmm_params wmm_params[4];
+ u32 wmm_param_type;
+} __packed;
+
+struct wmi_wmm_params_all_arg {
+ struct wmi_wmm_params_arg ac_be;
+ struct wmi_wmm_params_arg ac_bk;
+ struct wmi_wmm_params_arg ac_vi;
+ struct wmi_wmm_params_arg ac_vo;
+};
+
+#define ATH11K_TWT_DEF_STA_CONG_TIMER_MS 5000
+#define ATH11K_TWT_DEF_DEFAULT_SLOT_SIZE 10
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_TEARDOWN 20
+#define ATH11K_TWT_DEF_CONGESTION_THRESH_CRITICAL 100
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_TEARDOWN 80
+#define ATH11K_TWT_DEF_INTERFERENCE_THRESH_SETUP 50
+#define ATH11K_TWT_DEF_MIN_NO_STA_SETUP 10
+#define ATH11K_TWT_DEF_MIN_NO_STA_TEARDOWN 2
+#define ATH11K_TWT_DEF_NO_OF_BCAST_MCAST_SLOTS 2
+#define ATH11K_TWT_DEF_MIN_NO_TWT_SLOTS 2
+#define ATH11K_TWT_DEF_MAX_NO_STA_TWT 500
+#define ATH11K_TWT_DEF_MODE_CHECK_INTERVAL 10000
+#define ATH11K_TWT_DEF_ADD_STA_SLOT_INTERVAL 1000
+#define ATH11K_TWT_DEF_REMOVE_STA_SLOT_INTERVAL 5000
+
+struct wmi_twt_enable_params {
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+};
+
+struct wmi_twt_enable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 sta_cong_timer_ms;
+ u32 mbss_support;
+ u32 default_slot_size;
+ u32 congestion_thresh_setup;
+ u32 congestion_thresh_teardown;
+ u32 congestion_thresh_critical;
+ u32 interference_thresh_teardown;
+ u32 interference_thresh_setup;
+ u32 min_no_sta_setup;
+ u32 min_no_sta_teardown;
+ u32 no_of_bcast_mcast_slots;
+ u32 min_no_twt_slots;
+ u32 max_no_sta_twt;
+ u32 mode_check_interval;
+ u32 add_sta_slot_interval;
+ u32 remove_sta_slot_interval;
+} __packed;
+
+struct wmi_twt_disable_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+} __packed;
+
+enum WMI_HOST_TWT_COMMAND {
+ WMI_HOST_TWT_COMMAND_REQUEST_TWT = 0,
+ WMI_HOST_TWT_COMMAND_SUGGEST_TWT,
+ WMI_HOST_TWT_COMMAND_DEMAND_TWT,
+ WMI_HOST_TWT_COMMAND_TWT_GROUPING,
+ WMI_HOST_TWT_COMMAND_ACCEPT_TWT,
+ WMI_HOST_TWT_COMMAND_ALTERNATE_TWT,
+ WMI_HOST_TWT_COMMAND_DICTATE_TWT,
+ WMI_HOST_TWT_COMMAND_REJECT_TWT,
+};
+
+#define WMI_TWT_ADD_DIALOG_FLAG_BCAST BIT(8)
+#define WMI_TWT_ADD_DIALOG_FLAG_TRIGGER BIT(9)
+#define WMI_TWT_ADD_DIALOG_FLAG_FLOW_TYPE BIT(10)
+#define WMI_TWT_ADD_DIALOG_FLAG_PROTECTION BIT(11)
+
+struct wmi_twt_add_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u32 flags;
+} __packed;
+
+struct wmi_twt_add_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 wake_intvl_us;
+ u32 wake_intvl_mantis;
+ u32 wake_dura_us;
+ u32 sp_offset_us;
+ u8 twt_cmd;
+ u8 flag_bcast;
+ u8 flag_trigger;
+ u8 flag_flow_type;
+ u8 flag_protection;
+} __packed;
+
+enum wmi_twt_add_dialog_status {
+ WMI_ADD_TWT_STATUS_OK,
+ WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED,
+ WMI_ADD_TWT_STATUS_USED_DIALOG_ID,
+ WMI_ADD_TWT_STATUS_INVALID_PARAM,
+ WMI_ADD_TWT_STATUS_NOT_READY,
+ WMI_ADD_TWT_STATUS_NO_RESOURCE,
+ WMI_ADD_TWT_STATUS_NO_ACK,
+ WMI_ADD_TWT_STATUS_NO_RESPONSE,
+ WMI_ADD_TWT_STATUS_DENIED,
+ WMI_ADD_TWT_STATUS_UNKNOWN_ERROR,
+};
+
+struct wmi_twt_add_dialog_event {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 status;
+} __packed;
+
+struct wmi_twt_del_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_del_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_pause_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+} __packed;
+
+struct wmi_twt_resume_dialog_params {
+ u32 vdev_id;
+ u8 peer_macaddr[ETH_ALEN];
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
+struct wmi_twt_resume_dialog_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 dialog_id;
+ u32 sp_offset_us;
+ u32 next_twt_size;
+} __packed;
+
+struct wmi_obss_spatial_reuse_params_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 enable;
+ s32 obss_min;
+ s32 obss_max;
+ u32 vdev_id;
+} __packed;
+
+struct wmi_pdev_obss_pd_bitmap_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 bitmap[2];
+} __packed;
+
+#define ATH11K_BSS_COLOR_COLLISION_SCAN_PERIOD_MS 200
+#define ATH11K_OBSS_COLOR_COLLISION_DETECTION_DISABLE 0
+#define ATH11K_OBSS_COLOR_COLLISION_DETECTION 1
+
+#define ATH11K_BSS_COLOR_COLLISION_DETECTION_STA_PERIOD_MS 10000
+#define ATH11K_BSS_COLOR_COLLISION_DETECTION_AP_PERIOD_MS 5000
+
+enum wmi_bss_color_collision {
+ WMI_BSS_COLOR_COLLISION_DISABLE = 0,
+ WMI_BSS_COLOR_COLLISION_DETECTION,
+ WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY,
+ WMI_BSS_COLOR_FREE_SLOT_AVAILABLE,
+};
+
+struct wmi_obss_color_collision_cfg_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 flags;
+ u32 evt_type;
+ u32 current_bss_color;
+ u32 detection_period_ms;
+ u32 scan_period_ms;
+ u32 free_slot_expiry_time_ms;
+} __packed;
+
+struct wmi_bss_color_change_enable_params_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enable;
+} __packed;
+
+struct wmi_obss_color_collision_event {
+ u32 vdev_id;
+ u32 evt_type;
+ u64 obss_color_bitmap;
+} __packed;
+
+#define ATH11K_IPV4_TH_SEED_SIZE 5
+#define ATH11K_IPV6_TH_SEED_SIZE 11
+
+struct ath11k_wmi_pdev_lro_config_cmd {
+ u32 tlv_header;
+ u32 lro_enable;
+ u32 res;
+ u32 th_4[ATH11K_IPV4_TH_SEED_SIZE];
+ u32 th_6[ATH11K_IPV6_TH_SEED_SIZE];
+ u32 pdev_id;
+} __packed;
+
+#define ATH11K_WMI_SPECTRAL_COUNT_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_PERIOD_DEFAULT 224
+#define ATH11K_WMI_SPECTRAL_PRIORITY_DEFAULT 1
+#define ATH11K_WMI_SPECTRAL_FFT_SIZE_DEFAULT 7
+#define ATH11K_WMI_SPECTRAL_GC_ENA_DEFAULT 1
+#define ATH11K_WMI_SPECTRAL_RESTART_ENA_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_NOISE_FLOOR_REF_DEFAULT -96
+#define ATH11K_WMI_SPECTRAL_INIT_DELAY_DEFAULT 80
+#define ATH11K_WMI_SPECTRAL_NB_TONE_THR_DEFAULT 12
+#define ATH11K_WMI_SPECTRAL_STR_BIN_THR_DEFAULT 8
+#define ATH11K_WMI_SPECTRAL_WB_RPT_MODE_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_RSSI_RPT_MODE_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_RSSI_THR_DEFAULT 0xf0
+#define ATH11K_WMI_SPECTRAL_PWR_FORMAT_DEFAULT 0
+#define ATH11K_WMI_SPECTRAL_RPT_MODE_DEFAULT 2
+#define ATH11K_WMI_SPECTRAL_BIN_SCALE_DEFAULT 1
+#define ATH11K_WMI_SPECTRAL_DBM_ADJ_DEFAULT 1
+#define ATH11K_WMI_SPECTRAL_CHN_MASK_DEFAULT 1
+
+struct ath11k_wmi_vdev_spectral_conf_param {
+ u32 vdev_id;
+ u32 scan_count;
+ u32 scan_period;
+ u32 scan_priority;
+ u32 scan_fft_size;
+ u32 scan_gc_ena;
+ u32 scan_restart_ena;
+ u32 scan_noise_floor_ref;
+ u32 scan_init_delay;
+ u32 scan_nb_tone_thr;
+ u32 scan_str_bin_thr;
+ u32 scan_wb_rpt_mode;
+ u32 scan_rssi_rpt_mode;
+ u32 scan_rssi_thr;
+ u32 scan_pwr_format;
+ u32 scan_rpt_mode;
+ u32 scan_bin_scale;
+ u32 scan_dbm_adj;
+ u32 scan_chn_mask;
+} __packed;
+
+struct ath11k_wmi_vdev_spectral_conf_cmd {
+ u32 tlv_header;
+ struct ath11k_wmi_vdev_spectral_conf_param param;
+} __packed;
+
+#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_TRIGGER 1
+#define ATH11K_WMI_SPECTRAL_TRIGGER_CMD_CLEAR 2
+#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_ENABLE 1
+#define ATH11K_WMI_SPECTRAL_ENABLE_CMD_DISABLE 2
+
+struct ath11k_wmi_vdev_spectral_enable_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 trigger_cmd;
+ u32 enable_cmd;
+} __packed;
+
+struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 module_id; /* see enum wmi_direct_buffer_module */
+ u32 base_paddr_lo;
+ u32 base_paddr_hi;
+ u32 head_idx_paddr_lo;
+ u32 head_idx_paddr_hi;
+ u32 tail_idx_paddr_lo;
+ u32 tail_idx_paddr_hi;
+ u32 num_elems; /* Number of elems in the ring */
+ u32 buf_size; /* size of allocated buffer in bytes */
+
+ /* Number of wmi_dma_buf_release_entry packed together */
+ u32 num_resp_per_event;
+
+ /* Target should timeout and send whatever resp
+ * it has if this time expires, units in milliseconds
+ */
+ u32 event_timeout_ms;
+} __packed;
+
+struct ath11k_wmi_dma_buf_release_fixed_param {
+ u32 pdev_id;
+ u32 module_id;
+ u32 num_buf_release_entry;
+ u32 num_meta_data_entry;
+} __packed;
+
+struct wmi_dma_buf_release_entry {
+ u32 tlv_header;
+ u32 paddr_lo;
+
+ /* Bits 11:0: address of data
+ * Bits 31:12: host context data
+ */
+ u32 paddr_hi;
+} __packed;
+
+#define WMI_SPECTRAL_META_INFO1_FREQ1 GENMASK(15, 0)
+#define WMI_SPECTRAL_META_INFO1_FREQ2 GENMASK(31, 16)
+
+#define WMI_SPECTRAL_META_INFO2_CHN_WIDTH GENMASK(7, 0)
+
+struct wmi_dma_buf_release_meta_data {
+ u32 tlv_header;
+ s32 noise_floor[WMI_MAX_CHAINS];
+ u32 reset_delay;
+ u32 freq1;
+ u32 freq2;
+ u32 ch_width;
+} __packed;
+
+enum wmi_fils_discovery_cmd_type {
+ WMI_FILS_DISCOVERY_CMD,
+ WMI_UNSOL_BCAST_PROBE_RESP,
+};
+
+struct wmi_fils_discovery_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 interval;
+ u32 config; /* enum wmi_fils_discovery_cmd_type */
+} __packed;
+
+struct wmi_fils_discovery_tmpl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 buf_len;
+} __packed;
+
+struct wmi_probe_tmpl_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 buf_len;
+} __packed;
+
+struct target_resource_config {
+ u32 num_vdevs;
+ u32 num_peers;
+ u32 num_active_peers;
+ u32 num_offload_peers;
+ u32 num_offload_reorder_buffs;
+ u32 num_peer_keys;
+ u32 num_tids;
+ u32 ast_skid_limit;
+ u32 tx_chain_mask;
+ u32 rx_chain_mask;
+ u32 rx_timeout_pri[4];
+ u32 rx_decap_mode;
+ u32 scan_max_pending_req;
+ u32 bmiss_offload_max_vdev;
+ u32 roam_offload_max_vdev;
+ u32 roam_offload_max_ap_profiles;
+ u32 num_mcast_groups;
+ u32 num_mcast_table_elems;
+ u32 mcast2ucast_mode;
+ u32 tx_dbg_log_size;
+ u32 num_wds_entries;
+ u32 dma_burst_size;
+ u32 mac_aggr_delim;
+ u32 rx_skip_defrag_timeout_dup_detection_check;
+ u32 vow_config;
+ u32 gtk_offload_max_vdev;
+ u32 num_msdu_desc;
+ u32 max_frag_entries;
+ u32 max_peer_ext_stats;
+ u32 smart_ant_cap;
+ u32 bk_minfree;
+ u32 be_minfree;
+ u32 vi_minfree;
+ u32 vo_minfree;
+ u32 rx_batchmode;
+ u32 tt_support;
+ u32 flag1;
+ u32 iphdr_pad_config;
+ u32 qwrap_config:16,
+ alloc_frag_desc_for_data_pkt:16;
+ u32 num_tdls_vdevs;
+ u32 num_tdls_conn_table_entries;
+ u32 beacon_tx_offload_max_vdev;
+ u32 num_multicast_filter_entries;
+ u32 num_wow_filters;
+ u32 num_keep_alive_pattern;
+ u32 keep_alive_pattern_size;
+ u32 max_tdls_concurrent_sleep_sta;
+ u32 max_tdls_concurrent_buffer_sta;
+ u32 wmi_send_separate;
+ u32 num_ocb_vdevs;
+ u32 num_ocb_channels;
+ u32 num_ocb_schedules;
+ u32 num_ns_ext_tuples_cfg;
+ u32 bpf_instruction_size;
+ u32 max_bssid_rx_filters;
+ u32 use_pdev_id;
+ u32 peer_map_unmap_v2_support;
+ u32 sched_params;
+ u32 twt_ap_pdev_count;
+ u32 twt_ap_sta_count;
+ u8 is_reg_cc_ext_event_supported;
+ u32 ema_max_vap_cnt;
+ u32 ema_max_profile_period;
+};
+
+enum wmi_debug_log_param {
+ WMI_DEBUG_LOG_PARAM_LOG_LEVEL = 0x1,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_DISABLE,
+ WMI_DEBUG_LOG_PARAM_VDEV_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_MOD_ENABLE_BITMAP,
+ WMI_DEBUG_LOG_PARAM_WOW_MOD_ENABLE_BITMAP,
+};
+
+struct wmi_debug_log_config_cmd_fixed_param {
+ u32 tlv_header;
+ u32 dbg_log_param;
+ u32 value;
+} __packed;
+
+#define WMI_MAX_MEM_REQS 32
+
+#define MAX_RADIOS 3
+
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
+
+enum ath11k_wmi_peer_ps_state {
+ WMI_PEER_PS_STATE_OFF,
+ WMI_PEER_PS_STATE_ON,
+ WMI_PEER_PS_STATE_DISABLED,
+};
+
+enum wmi_peer_ps_supported_bitmap {
+ /* Used to indicate that power save state change is valid */
+ WMI_PEER_PS_VALID = 0x1,
+ WMI_PEER_PS_STATE_TIMESTAMP = 0x2,
+};
+
+struct wmi_peer_sta_ps_state_chg_event {
+ struct wmi_mac_addr peer_macaddr;
+ u32 peer_ps_state;
+ u32 ps_supported_bitmap;
+ u32 peer_ps_valid;
+ u32 peer_ps_timestamp;
+} __packed;
+
+struct ath11k_wmi_base {
+ struct ath11k_base *ab;
+ struct ath11k_pdev_wmi wmi[MAX_RADIOS];
+ enum ath11k_htc_ep_id wmi_endpoint_id[MAX_RADIOS];
+ u32 max_msg_len[MAX_RADIOS];
+
+ struct completion service_ready;
+ struct completion unified_ready;
+ DECLARE_BITMAP(svc_map, WMI_MAX_EXT2_SERVICE);
+ wait_queue_head_t tx_credits_wq;
+ const struct wmi_peer_flags_map *peer_flags;
+ u32 num_mem_chunks;
+ u32 rx_decap_mode;
+ struct wmi_host_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
+
+ enum wmi_host_hw_mode_config_type preferred_hw_mode;
+ struct target_resource_config wlan_resource_config;
+
+ struct ath11k_targ_cap *targ_cap;
+};
+
+/* Definition of HW data filtering */
+enum hw_data_filter_type {
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
+ WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
+};
+
+struct wmi_hw_data_filter_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enable;
+ u32 hw_filter_bitmap;
+} __packed;
+
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+enum wmi_wow_interface_cfg {
+ WOW_IFACE_PAUSE_ENABLED,
+ WOW_IFACE_PAUSE_DISABLED
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_PAGE_FAULT = 0x3a,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_PAGE_FAULT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+struct wmi_wow_ev_arg {
+ u32 vdev_id;
+ u32 flag;
+ enum wmi_wow_wake_reason wake_reason;
+ u32 data_len;
+};
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+struct wmi_wow_add_del_event_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 is_add;
+ u32 event_bitmap;
+} __packed;
+
+struct wmi_wow_enable_cmd {
+ u32 tlv_header;
+ u32 enable;
+ u32 pause_iface_config;
+ u32 flags;
+} __packed;
+
+struct wmi_wow_host_wakeup_ind {
+ u32 tlv_header;
+ u32 reserved;
+} __packed;
+
+struct wmi_tlv_wow_event_info {
+ u32 vdev_id;
+ u32 flag;
+ u32 wake_reason;
+ u32 data_len;
+} __packed;
+
+struct wmi_wow_bitmap_pattern {
+ u32 tlv_header;
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ u32 pattern_offset;
+ u32 pattern_len;
+ u32 bitmask_len;
+ u32 pattern_id;
+} __packed;
+
+struct wmi_wow_add_pattern_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 pattern_id;
+ u32 pattern_type;
+} __packed;
+
+struct wmi_wow_del_pattern_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 pattern_id;
+ u32 pattern_type;
+} __packed;
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24G_DEFAULT_CH 1
+#define WMI_PNO_5G_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP BIT(0)
+#define WMI_NLO_CONFIG_START BIT(1)
+#define WMI_NLO_CONFIG_RESET BIT(2)
+#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4)
+#define WMI_NLO_CONFIG_FAST_SCAN BIT(5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO BIT(7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8)
+#define WMI_NLO_CONFIG_ENLO_RESET BIT(9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13)
+
+struct wmi_nlo_ssid_param {
+ u32 valid;
+ struct wmi_ssid ssid;
+} __packed;
+
+struct wmi_nlo_enc_param {
+ u32 valid;
+ u32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_param {
+ u32 valid;
+ u32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_param {
+ u32 valid;
+ u32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_param {
+ u32 valid;
+ s32 rssi;
+} __packed;
+
+struct nlo_configured_parameters {
+ /* TLV tag and len;*/
+ u32 tlv_header;
+ struct wmi_nlo_ssid_param ssid;
+ struct wmi_nlo_enc_param enc_type;
+ struct wmi_nlo_auth_param auth_type;
+ struct wmi_nlo_rssi_param rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_param bcast_nw_type;
+} __packed;
+
+struct wmi_network_type {
+ struct wmi_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+};
+
+struct wmi_pno_scan_req {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+};
+
+struct wmi_wow_nlo_config_cmd {
+ u32 tlv_header;
+ u32 flags;
+ u32 vdev_id;
+ u32 fast_scan_max_cycles;
+ u32 active_dwell_time;
+ u32 passive_dwell_time;
+ u32 probe_bundle_size;
+
+ /* ART = IRT */
+ u32 rest_time;
+
+ /* Max value that can be reached after SBM */
+ u32 max_rest_time;
+
+ /* SBM */
+ u32 scan_backoff_multiplier;
+
+ /* SCBM */
+ u32 fast_scan_period;
+
+ /* specific to windows */
+ u32 slow_scan_period;
+
+ u32 no_of_ssids;
+
+ u32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ u32 delay_start_time;
+
+ /* MAC Address to use in Probe Req as SA */
+ struct wmi_mac_addr mac_addr;
+
+ /* Mask on which MAC has to be randomized */
+ struct wmi_mac_addr mac_mask;
+
+ /* IE bitmap to use in Probe Req */
+ u32 ie_bitmap[8];
+
+ /* Number of vendor OUIs. In the TLV vendor_oui[] */
+ u32 num_vendor_oui;
+
+ /* Number of connected NLO band preferences */
+ u32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_parameters nlo_list[];
+ * u32 channel_list[num_of_channels];
+ */
+} __packed;
+
+#define WMI_MAX_NS_OFFLOADS 2
+#define WMI_MAX_ARP_OFFLOADS 2
+
+#define WMI_ARPOL_FLAGS_VALID BIT(0)
+#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2)
+
+struct wmi_arp_offload_tuple {
+ u32 tlv_header;
+ u32 flags;
+ u8 target_ipaddr[4];
+ u8 remote_ipaddr[4];
+ struct wmi_mac_addr target_mac;
+} __packed;
+
+#define WMI_NSOL_FLAGS_VALID BIT(0)
+#define WMI_NSOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2)
+#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3)
+
+#define WMI_NSOL_MAX_TARGET_IPS 2
+
+struct wmi_ns_offload_tuple {
+ u32 tlv_header;
+ u32 flags;
+ u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
+ u8 solicitation_ipaddr[16];
+ u8 remote_ipaddr[16];
+ struct wmi_mac_addr target_mac;
+} __packed;
+
+struct wmi_set_arp_ns_offload_cmd {
+ u32 tlv_header;
+ u32 flags;
+ u32 vdev_id;
+ u32 num_ns_ext_tuples;
+ /* The TLVs follow:
+ * wmi_ns_offload_tuple ns_tuples[WMI_MAX_NS_OFFLOADS];
+ * wmi_arp_offload_tuple arp_tuples[WMI_MAX_ARP_OFFLOADS];
+ * wmi_ns_offload_tuple ns_ext_tuples[num_ns_ext_tuples];
+ */
+} __packed;
+
+#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000
+#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000
+#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000
+#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000
+
+#define GTK_OFFLOAD_KEK_BYTES 16
+#define GTK_OFFLOAD_KCK_BYTES 16
+#define GTK_REPLAY_COUNTER_BYTES 8
+#define WMI_MAX_KEY_LEN 32
+#define IGTK_PN_SIZE 6
+
+struct wmi_replayc_cnt {
+ union {
+ u8 counter[GTK_REPLAY_COUNTER_BYTES];
+ struct {
+ u32 word0;
+ u32 word1;
+ } __packed;
+ } __packed;
+} __packed;
+
+struct wmi_gtk_offload_status_event {
+ u32 vdev_id;
+ u32 flags;
+ u32 refresh_cnt;
+ struct wmi_replayc_cnt replay_ctr;
+ u8 igtk_key_index;
+ u8 igtk_key_length;
+ u8 igtk_key_rsc[IGTK_PN_SIZE];
+ u8 igtk_key[WMI_MAX_KEY_LEN];
+ u8 gtk_key_index;
+ u8 gtk_key_length;
+ u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
+ u8 gtk_key[WMI_MAX_KEY_LEN];
+} __packed;
+
+struct wmi_gtk_rekey_offload_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 flags;
+ u8 kek[GTK_OFFLOAD_KEK_BYTES];
+ u8 kck[GTK_OFFLOAD_KCK_BYTES];
+ u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES];
+} __packed;
+
+#define BIOS_SAR_TABLE_LEN (22)
+#define BIOS_SAR_RSVD1_LEN (6)
+#define BIOS_SAR_RSVD2_LEN (18)
+
+struct wmi_pdev_set_sar_table_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 sar_len;
+ u32 rsvd_len;
+} __packed;
+
+struct wmi_pdev_set_geo_table_cmd {
+ u32 tlv_header;
+ u32 pdev_id;
+ u32 rsvd_len;
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 enabled;
+
+ /* WMI_STA_KEEPALIVE_METHOD_ */
+ u32 method;
+
+ /* in seconds */
+ u32 interval;
+
+ /* following this structure is the TLV for struct
+ * wmi_sta_keepalive_arp_resp
+ */
+} __packed;
+
+struct wmi_sta_keepalive_arp_resp {
+ u32 tlv_header;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ struct wmi_mac_addr dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2,
+ WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3,
+ WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4,
+ WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
+const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab, const void *ptr,
+ size_t len, gfp_t gfp);
+int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
+ u32 cmd_id);
+struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
+int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
+ struct sk_buff *frame);
+int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_mutable_offsets *offs,
+ struct sk_buff *bcn, u32 ema_param);
+int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_up(struct ath11k *ar, u32 vdev_id, u32 aid,
+ const u8 *bssid, u8 *tx_bssid, u32 nontx_profile_idx,
+ u32 nontx_profile_cnt);
+int ath11k_wmi_vdev_stop(struct ath11k *ar, u8 vdev_id);
+int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
+ bool restart);
+int ath11k_wmi_set_peer_param(struct ath11k *ar, const u8 *peer_addr,
+ u32 vdev_id, u32 param_id, u32 param_val);
+int ath11k_wmi_pdev_set_param(struct ath11k *ar, u32 param_id,
+ u32 param_value, u8 pdev_id);
+int ath11k_wmi_pdev_set_ps_mode(struct ath11k *ar, int vdev_id,
+ enum wmi_sta_ps_mode psmode);
+int ath11k_wmi_wait_for_unified_ready(struct ath11k_base *ab);
+int ath11k_wmi_cmd_init(struct ath11k_base *ab);
+int ath11k_wmi_wait_for_service_ready(struct ath11k_base *ab);
+int ath11k_wmi_connect(struct ath11k_base *ab);
+int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
+ u8 pdev_id);
+int ath11k_wmi_attach(struct ath11k_base *ab);
+void ath11k_wmi_detach(struct ath11k_base *ab);
+int ath11k_wmi_vdev_create(struct ath11k *ar, u8 *macaddr,
+ struct vdev_create_params *param);
+int ath11k_wmi_peer_rx_reorder_queue_setup(struct ath11k *ar, int vdev_id,
+ const u8 *addr, dma_addr_t paddr,
+ u8 tid, u8 ba_window_size_valid,
+ u32 ba_window_size);
+int ath11k_wmi_send_peer_create_cmd(struct ath11k *ar,
+ struct peer_create_params *param);
+int ath11k_wmi_vdev_set_param_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 param_id, u32 param_value);
+
+int ath11k_wmi_set_sta_ps_param(struct ath11k *ar, u32 vdev_id,
+ u32 param, u32 param_value);
+int ath11k_wmi_force_fw_hang_cmd(struct ath11k *ar, u32 type, u32 delay_time_ms);
+int ath11k_wmi_send_peer_delete_cmd(struct ath11k *ar,
+ const u8 *peer_addr, u8 vdev_id);
+int ath11k_wmi_vdev_delete(struct ath11k *ar, u8 vdev_id);
+void ath11k_wmi_start_scan_init(struct ath11k *ar, struct scan_req_params *arg);
+int ath11k_wmi_send_scan_start_cmd(struct ath11k *ar,
+ struct scan_req_params *params);
+int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
+ struct scan_cancel_param *param);
+int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
+ struct wmi_wmm_params_all_arg *param);
+int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
+ u32 pdev_id);
+int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
+
+int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
+ struct peer_assoc_params *param);
+int ath11k_wmi_vdev_install_key(struct ath11k *ar,
+ struct wmi_vdev_install_key_arg *arg);
+int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
+ enum wmi_bss_chan_info_req_type type);
+int ath11k_wmi_send_stats_request_cmd(struct ath11k *ar,
+ struct stats_request_params *param);
+int ath11k_wmi_send_pdev_temperature_cmd(struct ath11k *ar);
+int ath11k_wmi_send_peer_flush_tids_cmd(struct ath11k *ar,
+ u8 peer_addr[ETH_ALEN],
+ struct peer_flush_params *param);
+int ath11k_wmi_send_set_ap_ps_param_cmd(struct ath11k *ar, u8 *peer_addr,
+ struct ap_ps_params *param);
+int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
+ struct scan_chan_list_params *chan_list);
+int ath11k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath11k *ar,
+ u32 pdev_id);
+int ath11k_wmi_addba_clear_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac);
+int ath11k_wmi_addba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 buf_size);
+int ath11k_wmi_addba_set_resp(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 status);
+int ath11k_wmi_delba_send(struct ath11k *ar, u32 vdev_id, const u8 *mac,
+ u32 tid, u32 initiator, u32 reason);
+int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
+ u32 vdev_id, u32 bcn_ctrl_op);
+int ath11k_wmi_send_set_current_country_cmd(struct ath11k *ar,
+ struct wmi_set_current_country_params *param);
+int
+ath11k_wmi_send_init_country_cmd(struct ath11k *ar,
+ struct wmi_init_country_params init_cc_param);
+
+int ath11k_wmi_send_11d_scan_start_cmd(struct ath11k *ar,
+ struct wmi_11d_scan_start_params *param);
+int ath11k_wmi_send_11d_scan_stop_cmd(struct ath11k *ar, u32 vdev_id);
+
+int
+ath11k_wmi_send_thermal_mitigation_param_cmd(struct ath11k *ar,
+ struct thermal_mitigation_params *param);
+int ath11k_wmi_pdev_pktlog_enable(struct ath11k *ar, u32 pktlog_filter);
+int ath11k_wmi_pdev_pktlog_disable(struct ath11k *ar);
+int ath11k_wmi_pdev_peer_pktlog_filter(struct ath11k *ar, u8 *addr, u8 enable);
+int
+ath11k_wmi_rx_reord_queue_remove(struct ath11k *ar,
+ struct rx_reorder_queue_remove_params *param);
+int ath11k_wmi_send_pdev_set_regdomain(struct ath11k *ar,
+ struct pdev_set_regdomain_params *param);
+int ath11k_wmi_pull_fw_stats(struct ath11k_base *ab, struct sk_buff *skb,
+ struct ath11k_fw_stats *stats);
+void ath11k_wmi_fw_stats_fill(struct ath11k *ar,
+ struct ath11k_fw_stats *fw_stats, u32 stats_id,
+ char *buf);
+int ath11k_wmi_simulate_radar(struct ath11k *ar);
+void ath11k_wmi_fill_default_twt_params(struct wmi_twt_enable_params *twt_params);
+int ath11k_wmi_send_twt_enable_cmd(struct ath11k *ar, u32 pdev_id,
+ struct wmi_twt_enable_params *params);
+int ath11k_wmi_send_twt_disable_cmd(struct ath11k *ar, u32 pdev_id);
+int ath11k_wmi_send_twt_add_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_add_dialog_params *params);
+int ath11k_wmi_send_twt_del_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_del_dialog_params *params);
+int ath11k_wmi_send_twt_pause_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_pause_dialog_params *params);
+int ath11k_wmi_send_twt_resume_dialog_cmd(struct ath11k *ar,
+ struct wmi_twt_resume_dialog_params *params);
+int ath11k_wmi_send_obss_spr_cmd(struct ath11k *ar, u32 vdev_id,
+ struct ieee80211_he_obss_pd *he_obss_pd);
+int ath11k_wmi_pdev_set_srg_bss_color_bitmap(struct ath11k *ar, u32 *bitmap);
+int ath11k_wmi_pdev_set_srg_patial_bssid_bitmap(struct ath11k *ar, u32 *bitmap);
+int ath11k_wmi_pdev_srg_obss_color_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_pdev_srg_obss_bssid_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_pdev_non_srg_obss_color_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_pdev_non_srg_obss_bssid_enable_bitmap(struct ath11k *ar,
+ u32 *bitmap);
+int ath11k_wmi_send_obss_color_collision_cfg_cmd(struct ath11k *ar, u32 vdev_id,
+ u8 bss_color, u32 period,
+ bool enable);
+int ath11k_wmi_send_bss_color_change_enable_cmd(struct ath11k *ar, u32 vdev_id,
+ bool enable);
+int ath11k_wmi_pdev_lro_cfg(struct ath11k *ar, int pdev_id);
+int ath11k_wmi_pdev_dma_ring_cfg(struct ath11k *ar,
+ struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd *param);
+int ath11k_wmi_vdev_spectral_enable(struct ath11k *ar, u32 vdev_id,
+ u32 trigger, u32 enable);
+int ath11k_wmi_vdev_spectral_conf(struct ath11k *ar,
+ struct ath11k_wmi_vdev_spectral_conf_param *param);
+int ath11k_wmi_fils_discovery_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct sk_buff *tmpl);
+int ath11k_wmi_fils_discovery(struct ath11k *ar, u32 vdev_id, u32 interval,
+ bool unsol_bcast_probe_resp_enabled);
+int ath11k_wmi_probe_resp_tmpl(struct ath11k *ar, u32 vdev_id,
+ struct sk_buff *tmpl);
+int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
+ enum wmi_host_hw_mode_config_type mode);
+int ath11k_wmi_wow_host_wakeup_ind(struct ath11k *ar);
+int ath11k_wmi_wow_enable(struct ath11k *ar);
+int ath11k_wmi_scan_prob_req_oui(struct ath11k *ar,
+ const u8 mac_addr[ETH_ALEN]);
+int ath11k_wmi_fw_dbglog_cfg(struct ath11k *ar, u32 *module_id_bitmap,
+ struct ath11k_fw_dbglog *dbglog);
+int ath11k_wmi_wow_config_pno(struct ath11k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req *pno_scan);
+int ath11k_wmi_wow_del_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id);
+int ath11k_wmi_wow_add_pattern(struct ath11k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset);
+int ath11k_wmi_wow_add_wakeup_event(struct ath11k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+int ath11k_wmi_hw_data_filter_cmd(struct ath11k *ar, u32 vdev_id,
+ u32 filter_bitmap, bool enable);
+int ath11k_wmi_arp_ns_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable);
+int ath11k_wmi_gtk_rekey_offload(struct ath11k *ar,
+ struct ath11k_vif *arvif, bool enable);
+int ath11k_wmi_gtk_rekey_getinfo(struct ath11k *ar,
+ struct ath11k_vif *arvif);
+int ath11k_wmi_pdev_set_bios_sar_table_param(struct ath11k *ar, const u8 *sar_val);
+int ath11k_wmi_pdev_set_bios_geo_table_param(struct ath11k *ar);
+int ath11k_wmi_sta_keepalive(struct ath11k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/wow.c b/drivers/net/wireless/ath/ath11k/wow.c
new file mode 100644
index 0000000000..99d8ba45a7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wow.c
@@ -0,0 +1,878 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wow.h"
+#include "dp_rx.h"
+
+static const struct wiphy_wowlan_support ath11k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+int ath11k_wow_enable(struct ath11k_base *ab)
+{
+ struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
+ int i, ret;
+
+ clear_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ for (i = 0; i < ATH11K_WOW_RETRY_NUM; i++) {
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath11k_wmi_wow_enable(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ab,
+ "timed out while waiting for htc suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ if (test_bit(ATH11K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
+ /* success, suspend complete received */
+ return 0;
+
+ ath11k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
+ i);
+ msleep(ATH11K_WOW_RETRY_WAIT_MS);
+ }
+
+ ath11k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
+
+ return -ETIMEDOUT;
+}
+
+int ath11k_wow_wakeup(struct ath11k_base *ab)
+{
+ struct ath11k *ar = ath11k_ab_to_ar(ab, 0);
+ int ret;
+
+ /* In the case of WCN6750, WoW wakeup is done
+ * by sending SMP2P power save exit message
+ * to the target processor.
+ */
+ if (ab->hw_params.smp2p_wow_exit)
+ return 0;
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ ret = ath11k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_vif_cleanup(struct ath11k_vif *arvif)
+{
+ struct ath11k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath11k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_cleanup(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void ath11k_wow_convert_8023_to_80211(struct cfg80211_pkt_pattern *new,
+ const struct cfg80211_pkt_pattern *old)
+{
+ u8 hdr_8023_pattern[ETH_HLEN] = {};
+ u8 hdr_8023_bit_mask[ETH_HLEN] = {};
+ u8 hdr_80211_pattern[WOW_HDR_LEN] = {};
+ u8 hdr_80211_bit_mask[WOW_HDR_LEN] = {};
+
+ int total_len = old->pkt_offset + old->pattern_len;
+ int hdr_80211_end_offset;
+
+ struct ieee80211_hdr_3addr *new_hdr_pattern =
+ (struct ieee80211_hdr_3addr *)hdr_80211_pattern;
+ struct ieee80211_hdr_3addr *new_hdr_mask =
+ (struct ieee80211_hdr_3addr *)hdr_80211_bit_mask;
+ struct ethhdr *old_hdr_pattern = (struct ethhdr *)hdr_8023_pattern;
+ struct ethhdr *old_hdr_mask = (struct ethhdr *)hdr_8023_bit_mask;
+ int hdr_len = sizeof(*new_hdr_pattern);
+
+ struct rfc1042_hdr *new_rfc_pattern =
+ (struct rfc1042_hdr *)(hdr_80211_pattern + hdr_len);
+ struct rfc1042_hdr *new_rfc_mask =
+ (struct rfc1042_hdr *)(hdr_80211_bit_mask + hdr_len);
+ int rfc_len = sizeof(*new_rfc_pattern);
+
+ memcpy(hdr_8023_pattern + old->pkt_offset,
+ old->pattern, ETH_HLEN - old->pkt_offset);
+ memcpy(hdr_8023_bit_mask + old->pkt_offset,
+ old->mask, ETH_HLEN - old->pkt_offset);
+
+ /* Copy destination address */
+ memcpy(new_hdr_pattern->addr1, old_hdr_pattern->h_dest, ETH_ALEN);
+ memcpy(new_hdr_mask->addr1, old_hdr_mask->h_dest, ETH_ALEN);
+
+ /* Copy source address */
+ memcpy(new_hdr_pattern->addr3, old_hdr_pattern->h_source, ETH_ALEN);
+ memcpy(new_hdr_mask->addr3, old_hdr_mask->h_source, ETH_ALEN);
+
+ /* Copy logic link type */
+ memcpy(&new_rfc_pattern->snap_type,
+ &old_hdr_pattern->h_proto,
+ sizeof(old_hdr_pattern->h_proto));
+ memcpy(&new_rfc_mask->snap_type,
+ &old_hdr_mask->h_proto,
+ sizeof(old_hdr_mask->h_proto));
+
+ /* Compute new pkt_offset */
+ if (old->pkt_offset < ETH_ALEN)
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+ else if (old->pkt_offset < offsetof(struct ethhdr, h_proto))
+ new->pkt_offset = old->pkt_offset +
+ offsetof(struct ieee80211_hdr_3addr, addr3) -
+ offsetof(struct ethhdr, h_source);
+ else
+ new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN;
+
+ /* Compute new hdr end offset */
+ if (total_len > ETH_HLEN)
+ hdr_80211_end_offset = hdr_len + rfc_len;
+ else if (total_len > offsetof(struct ethhdr, h_proto))
+ hdr_80211_end_offset = hdr_len + rfc_len + total_len - ETH_HLEN;
+ else if (total_len > ETH_ALEN)
+ hdr_80211_end_offset = total_len - ETH_ALEN +
+ offsetof(struct ieee80211_hdr_3addr, addr3);
+ else
+ hdr_80211_end_offset = total_len +
+ offsetof(struct ieee80211_hdr_3addr, addr1);
+
+ new->pattern_len = hdr_80211_end_offset - new->pkt_offset;
+
+ memcpy((u8 *)new->pattern,
+ hdr_80211_pattern + new->pkt_offset,
+ new->pattern_len);
+ memcpy((u8 *)new->mask,
+ hdr_80211_bit_mask + new->pkt_offset,
+ new->pattern_len);
+
+ if (total_len > ETH_HLEN) {
+ /* Copy frame body */
+ memcpy((u8 *)new->pattern + new->pattern_len,
+ (void *)old->pattern + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+ memcpy((u8 *)new->mask + new->pattern_len,
+ (void *)old->mask + ETH_HLEN - old->pkt_offset,
+ total_len - ETH_HLEN);
+
+ new->pattern_len += total_len - ETH_HLEN;
+ }
+}
+
+static int ath11k_wmi_pno_check_and_convert(struct ath11k *ar, u32 vdev_id,
+ struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req *pno)
+{
+ int i, j;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = ssid_len;
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /* Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ j = 0;
+ while (j < pno->uc_networks_count) {
+ if (pno->a_networks[j].ssid.ssid_len ==
+ nd_config->ssids[i].ssid_len &&
+ (memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ pno->a_networks[j].ssid.ssid_len) == 0)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ j++;
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath11k_warn(ar->ab, "Invalid number of scan plans %d !!",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+
+ return 0;
+}
+
+static int ath11k_vif_wow_set_wakeups(struct ath11k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ int ret, i;
+ unsigned long wow_mask = 0;
+ struct ath11k *ar = arvif->ar;
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ int pattern_id = 0;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ fallthrough;
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath11k_wmi_pno_check_and_convert(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ u8 bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_pattern[WOW_MAX_PATTERN_SIZE] = {};
+ u8 ath_bitmask[WOW_MAX_PATTERN_SIZE] = {};
+ struct cfg80211_pkt_pattern new_pattern = {};
+ struct cfg80211_pkt_pattern old_pattern = patterns[i];
+ int j;
+
+ new_pattern.pattern = ath_pattern;
+ new_pattern.mask = ath_bitmask;
+ if (patterns[i].pattern_len > WOW_MAX_PATTERN_SIZE)
+ continue;
+ /* convert bytemask to bitmask */
+ for (j = 0; j < patterns[i].pattern_len; j++)
+ if (patterns[i].mask[j / 8] & BIT(j % 8))
+ bitmask[j] = 0xff;
+ old_pattern.mask = bitmask;
+
+ if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
+ ATH11K_HW_TXRX_NATIVE_WIFI) {
+ if (patterns[i].pkt_offset < ETH_HLEN) {
+ u8 pattern_ext[WOW_MAX_PATTERN_SIZE] = {};
+
+ memcpy(pattern_ext, old_pattern.pattern,
+ old_pattern.pattern_len);
+ old_pattern.pattern = pattern_ext;
+ ath11k_wow_convert_8023_to_80211(&new_pattern,
+ &old_pattern);
+ } else {
+ new_pattern = old_pattern;
+ new_pattern.pkt_offset += WOW_HDR_LEN - ETH_HLEN;
+ }
+ }
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ ret = ath11k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.mask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath11k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_wakeups(struct ath11k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_vif_wow_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_vif_wow_clean_nlo(struct ath11k_vif *arvif)
+{
+ int ret = 0;
+ struct ath11k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ if (ar->nlo_enabled) {
+ struct wmi_pno_scan_req *pno;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ar->nlo_enabled = false;
+ ret = ath11k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int ath11k_wow_nlo_cleanup(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_vif_wow_clean_nlo(arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_hw_filter(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ u32 bitmap;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC |
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC;
+ ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id,
+ bitmap,
+ true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_clear_hw_filter(struct ath11k *ar)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_wmi_hw_data_filter_cmd(ar, arvif->vdev_id, 0, false);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_arp_ns_offload(struct ath11k *ar, bool enable)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ ret = ath11k_wmi_arp_ns_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_gtk_rekey_offload(struct ath11k *ar, bool enable)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
+ !arvif->is_up ||
+ !arvif->rekey_data.enable_offload)
+ continue;
+
+ /* get rekey info before disable rekey offload */
+ if (!enable) {
+ ret = ath11k_wmi_gtk_rekey_getinfo(ar, arvif);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath11k_wmi_gtk_rekey_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_protocol_offload(struct ath11k *ar, bool enable)
+{
+ int ret;
+
+ ret = ath11k_wow_arp_ns_offload(ar, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ ret = ath11k_gtk_rekey_offload(ar, enable);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath11k_wow_set_keepalive(struct ath11k *ar,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath11k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath11k_mac_vif_set_keepalive(arvif, method, interval);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ ret = ath11k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_dp_rx_pktlog_stop(ar->ab, true);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stop dp rx (and timer) pktlog during wow suspend: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_cleanup(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_protocol_offload(ar, true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_set_hw_filter(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set hw filter: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_wow_enable(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath11k_dp_rx_pktlog_stop(ar->ab, false);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to stop dp rx pktlog during wow suspend: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ath11k_ce_stop_shadow_timers(ar->ab);
+ ath11k_dp_stop_shadow_timers(ar->ab);
+
+ ath11k_hif_irq_disable(ar->ab);
+ ath11k_hif_ce_irq_disable(ar->ab);
+
+ ret = ath11k_hif_suspend(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath11k_wow_wakeup(ar->ab);
+
+cleanup:
+ ath11k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath11k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+ device_set_wakeup_enable(ar->ab->dev, enabled);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+int ath11k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath11k *ar = hw->priv;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath11k_hif_resume(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ath11k_hif_ce_irq_enable(ar->ab);
+ ath11k_hif_irq_enable(ar->ab);
+
+ ret = ath11k_dp_rx_pktlog_start(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start rx pktlog from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_wakeup(ar->ab);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_nlo_cleanup(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_clear_hw_filter(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_protocol_offload(ar, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath11k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ switch (ar->state) {
+ case ATH11K_STATE_ON:
+ ar->state = ATH11K_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH11K_STATE_OFF:
+ case ATH11K_STATE_RESTARTING:
+ case ATH11K_STATE_RESTARTED:
+ case ATH11K_STATE_WEDGED:
+ case ATH11K_STATE_FTM:
+ ath11k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
+ ar->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath11k_wow_init(struct ath11k *ar)
+{
+ if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
+ return 0;
+
+ ar->wow.wowlan_support = ath11k_wowlan_support;
+
+ if (ar->wmi->wmi_ab->wlan_resource_config.rx_decap_mode ==
+ ATH11K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
+ ar->wow.max_num_patterns = ATH11K_WOW_PATTERNS;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->ab->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath11k/wow.h b/drivers/net/wireless/ath/ath11k/wow.h
new file mode 100644
index 0000000000..553ba850d9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/wow.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _WOW_H_
+#define _WOW_H_
+
+struct ath11k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 snap_type;
+} __packed;
+
+#define ATH11K_WOW_RETRY_NUM 3
+#define ATH11K_WOW_RETRY_WAIT_MS 200
+#define ATH11K_WOW_PATTERNS 22
+
+#ifdef CONFIG_PM
+
+int ath11k_wow_init(struct ath11k *ar);
+int ath11k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath11k_wow_op_resume(struct ieee80211_hw *hw);
+void ath11k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+int ath11k_wow_enable(struct ath11k_base *ab);
+int ath11k_wow_wakeup(struct ath11k_base *ab);
+
+#else
+
+static inline int ath11k_wow_init(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_wow_enable(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline int ath11k_wow_wakeup(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PM */
+#endif /* _WOW_H_ */