summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/mediatek/mt76/mt7915
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/wireless/mediatek/mt76/mt7915
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/wireless/mediatek/mt76/mt7915')
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Kconfig25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/Makefile10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.c411
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/coredump.h136
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c1399
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c671
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c355
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c1261
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c2410
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h75
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c1700
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c3879
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h530
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c1067
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h596
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c241
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h1221
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c1322
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c788
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h105
21 files changed, 18363 insertions, 0 deletions
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
new file mode 100644
index 0000000000..193112c49b
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: ISC
+config MT7915E
+ tristate "MediaTek MT7915E (PCIe) support"
+ select MT76_CONNAC_LIB
+ select WANT_DEV_COREDUMP
+ depends on MAC80211
+ depends on PCI
+ select RELAY
+ help
+ This adds support for MT7915-based PCIe wireless devices,
+ which support concurrent dual-band operation at both 5GHz
+ and 2.4GHz IEEE 802.11ax 4x4:4SS 1024-QAM, 160MHz channels,
+ OFDMA, spatial reuse and dual carrier modulation.
+
+ To compile this driver as a module, choose M here.
+
+config MT798X_WMAC
+ bool "MT798x (SoC) WMAC support"
+ depends on MT7915E
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select REGMAP
+ help
+ This adds support for the built-in WMAC on MT7981 and MT7986 SoC device
+ which has the same feature set as a MT7915, but enables 6E
+ support.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/Makefile b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
new file mode 100644
index 0000000000..e0ca638c91
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: ISC
+
+obj-$(CONFIG_MT7915E) += mt7915e.o
+
+mt7915e-y := pci.o init.o dma.o eeprom.o main.o mcu.o mac.o \
+ debugfs.o mmio.o
+
+mt7915e-$(CONFIG_NL80211_TESTMODE) += testmode.o
+mt7915e-$(CONFIG_MT798X_WMAC) += soc.o
+mt7915e-$(CONFIG_DEV_COREDUMP) += coredump.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c
new file mode 100644
index 0000000000..5daf2258df
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#include <linux/devcoredump.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include "coredump.h"
+
+static bool coredump_memdump;
+module_param(coredump_memdump, bool, 0644);
+MODULE_PARM_DESC(coredump_memdump, "Optional ability to dump firmware memory");
+
+static const struct mt7915_mem_region mt7915_mem_regions[] = {
+ {
+ .start = 0xe003b400,
+ .len = 0x00003bff,
+ .name = "CRAM",
+ },
+};
+
+static const struct mt7915_mem_region mt7916_mem_regions[] = {
+ {
+ .start = 0x00800000,
+ .len = 0x0005ffff,
+ .name = "ROM",
+ },
+ {
+ .start = 0x00900000,
+ .len = 0x00013fff,
+ .name = "ULM1",
+ },
+ {
+ .start = 0x02200000,
+ .len = 0x0004ffff,
+ .name = "ULM2",
+ },
+ {
+ .start = 0x02300000,
+ .len = 0x0004ffff,
+ .name = "ULM3",
+ },
+ {
+ .start = 0x00400000,
+ .len = 0x00027fff,
+ .name = "SRAM",
+ },
+ {
+ .start = 0xe0000000,
+ .len = 0x00157fff,
+ .name = "CRAM",
+ },
+};
+
+static const struct mt7915_mem_region mt798x_mem_regions[] = {
+ {
+ .start = 0x00800000,
+ .len = 0x0005ffff,
+ .name = "ROM",
+ },
+ {
+ .start = 0x00900000,
+ .len = 0x0000ffff,
+ .name = "ULM1",
+ },
+ {
+ .start = 0x02200000,
+ .len = 0x0004ffff,
+ .name = "ULM2",
+ },
+ {
+ .start = 0x02300000,
+ .len = 0x0004ffff,
+ .name = "ULM3",
+ },
+ {
+ .start = 0x00400000,
+ .len = 0x00017fff,
+ .name = "SRAM",
+ },
+ {
+ .start = 0xe0000000,
+ .len = 0x00113fff,
+ .name = "CRAM",
+ },
+};
+
+const struct mt7915_mem_region*
+mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ *num = ARRAY_SIZE(mt7915_mem_regions);
+ return &mt7915_mem_regions[0];
+ case 0x7981:
+ case 0x7986:
+ *num = ARRAY_SIZE(mt798x_mem_regions);
+ return &mt798x_mem_regions[0];
+ case 0x7916:
+ *num = ARRAY_SIZE(mt7916_mem_regions);
+ return &mt7916_mem_regions[0];
+ default:
+ return NULL;
+ }
+}
+
+static int mt7915_coredump_get_mem_size(struct mt7915_dev *dev)
+{
+ const struct mt7915_mem_region *mem_region;
+ size_t size = 0;
+ u32 num;
+ int i;
+
+ mem_region = mt7915_coredump_get_mem_layout(dev, &num);
+ if (!mem_region)
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ size += mem_region->len;
+ mem_region++;
+ }
+
+ /* reserve space for the headers */
+ size += num * sizeof(struct mt7915_mem_hdr);
+ /* make sure it is aligned 4 bytes for debug message print out */
+ size = ALIGN(size, 4);
+
+ return size;
+}
+
+struct mt7915_crash_data *mt7915_coredump_new(struct mt7915_dev *dev)
+{
+ struct mt7915_crash_data *crash_data = dev->coredump.crash_data;
+
+ lockdep_assert_held(&dev->dump_mutex);
+
+ guid_gen(&crash_data->guid);
+ ktime_get_real_ts64(&crash_data->timestamp);
+
+ return crash_data;
+}
+
+static void
+mt7915_coredump_fw_state(struct mt7915_dev *dev, struct mt7915_coredump *dump,
+ bool *exception)
+{
+ u32 state, count, type;
+
+ type = (u32)mt76_get_field(dev, MT_FW_EXCEPT_TYPE, GENMASK(7, 0));
+ state = (u32)mt76_get_field(dev, MT_FW_ASSERT_STAT, GENMASK(7, 0));
+ count = is_mt7915(&dev->mt76) ?
+ (u32)mt76_get_field(dev, MT_FW_EXCEPT_COUNT, GENMASK(15, 8)) :
+ (u32)mt76_get_field(dev, MT_FW_EXCEPT_COUNT, GENMASK(7, 0));
+
+ /* normal mode: driver can manually trigger assertĀ for detail info */
+ if (!count)
+ strscpy(dump->fw_state, "normal", sizeof(dump->fw_state));
+ else if (state > 1 && (count == 1) && type == 5)
+ strscpy(dump->fw_state, "assert", sizeof(dump->fw_state));
+ else if ((state > 1 && count == 1) || count > 1)
+ strscpy(dump->fw_state, "exception", sizeof(dump->fw_state));
+
+ *exception = !!count;
+}
+
+static void
+mt7915_coredump_fw_trace(struct mt7915_dev *dev, struct mt7915_coredump *dump,
+ bool exception)
+{
+ u32 n, irq, sch, base = MT_FW_EINT_INFO;
+
+ /* trap or run? */
+ dump->last_msg_id = mt76_rr(dev, MT_FW_LAST_MSG_ID);
+
+ n = is_mt7915(&dev->mt76) ?
+ (u32)mt76_get_field(dev, base, GENMASK(7, 0)) :
+ (u32)mt76_get_field(dev, base, GENMASK(15, 8));
+ dump->eint_info_idx = n;
+
+ irq = mt76_rr(dev, base + 0x8);
+ n = is_mt7915(&dev->mt76) ?
+ FIELD_GET(GENMASK(7, 0), irq) : FIELD_GET(GENMASK(23, 16), irq);
+ dump->irq_info_idx = n;
+
+ sch = mt76_rr(dev, MT_FW_SCHED_INFO);
+ n = is_mt7915(&dev->mt76) ?
+ FIELD_GET(GENMASK(7, 0), sch) : FIELD_GET(GENMASK(15, 8), sch);
+ dump->sched_info_idx = n;
+
+ if (exception) {
+ u32 i, y;
+
+ /* sched trace */
+ n = is_mt7915(&dev->mt76) ?
+ FIELD_GET(GENMASK(15, 8), sch) : FIELD_GET(GENMASK(7, 0), sch);
+ n = n > 60 ? 60 : n;
+
+ strscpy(dump->trace_sched, "(sched_info) id, time",
+ sizeof(dump->trace_sched));
+
+ for (y = dump->sched_info_idx, i = 0; i < n; i++, y++) {
+ mt7915_memcpy_fromio(dev, dump->sched, base + 0xc + y * 12,
+ sizeof(dump->sched));
+ y = y >= n ? 0 : y;
+ }
+
+ /* irq trace */
+ n = is_mt7915(&dev->mt76) ?
+ FIELD_GET(GENMASK(15, 8), irq) : FIELD_GET(GENMASK(7, 0), irq);
+ n = n > 60 ? 60 : n;
+
+ strscpy(dump->trace_irq, "(irq_info) id, time",
+ sizeof(dump->trace_irq));
+
+ for (y = dump->irq_info_idx, i = 0; i < n; i++, y++) {
+ mt7915_memcpy_fromio(dev, dump->irq, base + 0x4 + y * 16,
+ sizeof(dump->irq));
+ y = y >= n ? 0 : y;
+ }
+ }
+}
+
+static void
+mt7915_coredump_fw_stack(struct mt7915_dev *dev, struct mt7915_coredump *dump,
+ bool exception)
+{
+ u32 oldest, i, idx;
+
+ /* stop call stack record */
+ if (!exception)
+ mt76_clear(dev, 0x89050200, BIT(0));
+
+ oldest = (u32)mt76_get_field(dev, 0x89050200, GENMASK(20, 16)) + 2;
+ for (i = 0; i < 16; i++) {
+ idx = ((oldest + 2 * i + 1) % 32);
+ dump->call_stack[i] = mt76_rr(dev, 0x89050204 + idx * 4);
+ }
+
+ /* start call stack record */
+ if (!exception)
+ mt76_set(dev, 0x89050200, BIT(0));
+}
+
+static void
+mt7915_coredump_fw_task(struct mt7915_dev *dev, struct mt7915_coredump *dump)
+{
+ u32 offs = is_mt7915(&dev->mt76) ? 0xe0 : 0x170;
+
+ strscpy(dump->task_qid, "(task queue id) read, write",
+ sizeof(dump->task_qid));
+
+ dump->taskq[0].read = mt76_rr(dev, MT_FW_TASK_QID1);
+ dump->taskq[0].write = mt76_rr(dev, MT_FW_TASK_QID1 - 4);
+ dump->taskq[1].read = mt76_rr(dev, MT_FW_TASK_QID2);
+ dump->taskq[1].write = mt76_rr(dev, MT_FW_TASK_QID2 - 4);
+
+ strscpy(dump->task_info, "(task stack) start, end, size",
+ sizeof(dump->task_info));
+
+ dump->taski[0].start = mt76_rr(dev, MT_FW_TASK_START);
+ dump->taski[0].end = mt76_rr(dev, MT_FW_TASK_END);
+ dump->taski[0].size = mt76_rr(dev, MT_FW_TASK_SIZE);
+ dump->taski[1].start = mt76_rr(dev, MT_FW_TASK_START + offs);
+ dump->taski[1].end = mt76_rr(dev, MT_FW_TASK_END + offs);
+ dump->taski[1].size = mt76_rr(dev, MT_FW_TASK_SIZE + offs);
+}
+
+static void
+mt7915_coredump_fw_context(struct mt7915_dev *dev, struct mt7915_coredump *dump)
+{
+ u32 count, idx, id;
+
+ count = mt76_rr(dev, MT_FW_CIRQ_COUNT);
+
+ /* current context */
+ if (!count) {
+ strscpy(dump->fw_context, "(context) interrupt",
+ sizeof(dump->fw_context));
+
+ idx = is_mt7915(&dev->mt76) ?
+ (u32)mt76_get_field(dev, MT_FW_CIRQ_IDX, GENMASK(31, 16)) :
+ (u32)mt76_get_field(dev, MT_FW_CIRQ_IDX, GENMASK(15, 0));
+ dump->context.idx = idx;
+ dump->context.handler = mt76_rr(dev, MT_FW_CIRQ_LISR);
+ } else {
+ idx = mt76_rr(dev, MT_FW_TASK_IDX);
+ id = mt76_rr(dev, MT_FW_TASK_ID);
+
+ if (!id && idx == 3) {
+ strscpy(dump->fw_context, "(context) idle",
+ sizeof(dump->fw_context));
+ } else if (id && idx != 3) {
+ strscpy(dump->fw_context, "(context) task",
+ sizeof(dump->fw_context));
+
+ dump->context.idx = idx;
+ dump->context.handler = id;
+ }
+ }
+}
+
+static struct mt7915_coredump *mt7915_coredump_build(struct mt7915_dev *dev)
+{
+ struct mt7915_crash_data *crash_data = dev->coredump.crash_data;
+ struct mt7915_coredump *dump;
+ struct mt7915_coredump_mem *dump_mem;
+ size_t len, sofar = 0, hdr_len = sizeof(*dump);
+ unsigned char *buf;
+ bool exception;
+
+ len = hdr_len;
+
+ if (coredump_memdump && crash_data->memdump_buf_len)
+ len += sizeof(*dump_mem) + crash_data->memdump_buf_len;
+
+ sofar += hdr_len;
+
+ /* this is going to get big when we start dumping memory and such,
+ * so go ahead and use vmalloc.
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return NULL;
+
+ mutex_lock(&dev->dump_mutex);
+
+ dump = (struct mt7915_coredump *)(buf);
+ dump->len = len;
+
+ /* plain text */
+ strscpy(dump->magic, "mt76-crash-dump", sizeof(dump->magic));
+ strscpy(dump->kernel, init_utsname()->release, sizeof(dump->kernel));
+ strscpy(dump->fw_ver, dev->mt76.hw->wiphy->fw_version,
+ sizeof(dump->fw_ver));
+
+ guid_copy(&dump->guid, &crash_data->guid);
+ dump->tv_sec = crash_data->timestamp.tv_sec;
+ dump->tv_nsec = crash_data->timestamp.tv_nsec;
+ dump->device_id = mt76_chip(&dev->mt76);
+
+ mt7915_coredump_fw_state(dev, dump, &exception);
+ mt7915_coredump_fw_trace(dev, dump, exception);
+ mt7915_coredump_fw_task(dev, dump);
+ mt7915_coredump_fw_context(dev, dump);
+ mt7915_coredump_fw_stack(dev, dump, exception);
+
+ /* gather memory content */
+ dump_mem = (struct mt7915_coredump_mem *)(buf + sofar);
+ dump_mem->len = crash_data->memdump_buf_len;
+ if (coredump_memdump && crash_data->memdump_buf_len)
+ memcpy(dump_mem->data, crash_data->memdump_buf,
+ crash_data->memdump_buf_len);
+
+ mutex_unlock(&dev->dump_mutex);
+
+ return dump;
+}
+
+int mt7915_coredump_submit(struct mt7915_dev *dev)
+{
+ struct mt7915_coredump *dump;
+
+ dump = mt7915_coredump_build(dev);
+ if (!dump) {
+ dev_warn(dev->mt76.dev, "no crash dump data found\n");
+ return -ENODATA;
+ }
+
+ dev_coredumpv(dev->mt76.dev, dump, dump->len, GFP_KERNEL);
+
+ return 0;
+}
+
+int mt7915_coredump_register(struct mt7915_dev *dev)
+{
+ struct mt7915_crash_data *crash_data;
+
+ crash_data = vzalloc(sizeof(*dev->coredump.crash_data));
+ if (!crash_data)
+ return -ENOMEM;
+
+ dev->coredump.crash_data = crash_data;
+
+ if (coredump_memdump) {
+ crash_data->memdump_buf_len = mt7915_coredump_get_mem_size(dev);
+ if (!crash_data->memdump_buf_len)
+ /* no memory content */
+ return 0;
+
+ crash_data->memdump_buf = vzalloc(crash_data->memdump_buf_len);
+ if (!crash_data->memdump_buf) {
+ vfree(crash_data);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+void mt7915_coredump_unregister(struct mt7915_dev *dev)
+{
+ if (dev->coredump.crash_data->memdump_buf) {
+ vfree(dev->coredump.crash_data->memdump_buf);
+ dev->coredump.crash_data->memdump_buf = NULL;
+ dev->coredump.crash_data->memdump_buf_len = 0;
+ }
+
+ vfree(dev->coredump.crash_data);
+ dev->coredump.crash_data = NULL;
+}
+
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/coredump.h b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.h
new file mode 100644
index 0000000000..709f8e9c79
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/coredump.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#ifndef _COREDUMP_H_
+#define _COREDUMP_H_
+
+#include "mt7915.h"
+
+struct trace {
+ u32 id;
+ u32 timestamp;
+};
+
+struct mt7915_coredump {
+ char magic[16];
+
+ u32 len;
+
+ guid_t guid;
+
+ /* time-of-day stamp */
+ u64 tv_sec;
+ /* time-of-day stamp, nano-seconds */
+ u64 tv_nsec;
+ /* kernel version */
+ char kernel[64];
+ /* firmware version */
+ char fw_ver[ETHTOOL_FWVERS_LEN];
+
+ u32 device_id;
+
+ /* exception state */
+ char fw_state[12];
+
+ u32 last_msg_id;
+ u32 eint_info_idx;
+ u32 irq_info_idx;
+ u32 sched_info_idx;
+
+ /* schedule info */
+ char trace_sched[32];
+ struct {
+ struct trace t;
+ u32 pc;
+ } sched[60];
+
+ /* irq info */
+ char trace_irq[32];
+ struct trace irq[60];
+
+ /* task queue status */
+ char task_qid[32];
+ struct {
+ u32 read;
+ u32 write;
+ } taskq[2];
+
+ /* task stack info */
+ char task_info[32];
+ struct {
+ u32 start;
+ u32 end;
+ u32 size;
+ } taski[2];
+
+ /* firmware context */
+ char fw_context[24];
+ struct {
+ u32 idx;
+ u32 handler;
+ } context;
+
+ /* link registers calltrace */
+ u32 call_stack[16];
+
+ /* memory content */
+ u8 data[];
+} __packed;
+
+struct mt7915_coredump_mem {
+ u32 len;
+ u8 data[];
+} __packed;
+
+struct mt7915_mem_hdr {
+ u32 start;
+ u32 len;
+ u8 data[];
+};
+
+struct mt7915_mem_region {
+ u32 start;
+ size_t len;
+
+ const char *name;
+};
+
+#ifdef CONFIG_DEV_COREDUMP
+
+const struct mt7915_mem_region *
+mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num);
+struct mt7915_crash_data *mt7915_coredump_new(struct mt7915_dev *dev);
+int mt7915_coredump_submit(struct mt7915_dev *dev);
+int mt7915_coredump_register(struct mt7915_dev *dev);
+void mt7915_coredump_unregister(struct mt7915_dev *dev);
+
+#else /* CONFIG_DEV_COREDUMP */
+
+static inline const struct mt7915_mem_region *
+mt7915_coredump_get_mem_layout(struct mt7915_dev *dev, u32 *num)
+{
+ return NULL;
+}
+
+static inline int mt7915_coredump_submit(struct mt7915_dev *dev)
+{
+ return 0;
+}
+
+static inline struct mt7915_crash_data *mt7915_coredump_new(struct mt7915_dev *dev)
+{
+ return NULL;
+}
+
+static inline int mt7915_coredump_register(struct mt7915_dev *dev)
+{
+ return 0;
+}
+
+static inline void mt7915_coredump_unregister(struct mt7915_dev *dev)
+{
+}
+
+#endif /* CONFIG_DEV_COREDUMP */
+
+#endif /* _COREDUMP_H_ */
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
new file mode 100644
index 0000000000..6c3696c8c7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -0,0 +1,1399 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/relay.h>
+#include "mt7915.h"
+#include "eeprom.h"
+#include "mcu.h"
+#include "mac.h"
+
+#define FW_BIN_LOG_MAGIC 0x44e98caf
+
+/** global debugfs **/
+
+struct hw_queue_map {
+ const char *name;
+ u8 index;
+ u8 pid;
+ u8 qid;
+};
+
+static int
+mt7915_implicit_txbf_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ /* The existing connected stations shall reconnect to apply
+ * new implicit txbf configuration.
+ */
+ dev->ibf = !!val;
+
+ return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
+}
+
+static int
+mt7915_implicit_txbf_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->ibf;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_implicit_txbf, mt7915_implicit_txbf_get,
+ mt7915_implicit_txbf_set, "%lld\n");
+
+/* test knob of system error recovery */
+static ssize_t
+mt7915_sys_recovery_set(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ bool band = phy->mt76->band_idx;
+ char buf[16];
+ int ret = 0;
+ u16 val;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
+
+ if (kstrtou16(buf, 0, &val))
+ return -EINVAL;
+
+ switch (val) {
+ /*
+ * 0: grab firmware current SER state.
+ * 1: trigger & enable system error L1 recovery.
+ * 2: trigger & enable system error L2 recovery.
+ * 3: trigger & enable system error L3 rx abort.
+ * 4: trigger & enable system error L3 tx abort
+ * 5: trigger & enable system error L3 tx disable.
+ * 6: trigger & enable system error L3 bf recovery.
+ * 7: trigger & enable system error full recovery.
+ * 8: trigger firmware crash.
+ */
+ case SER_QUERY:
+ ret = mt7915_mcu_set_ser(dev, 0, 0, band);
+ break;
+ case SER_SET_RECOVER_L1:
+ case SER_SET_RECOVER_L2:
+ case SER_SET_RECOVER_L3_RX_ABORT:
+ case SER_SET_RECOVER_L3_TX_ABORT:
+ case SER_SET_RECOVER_L3_TX_DISABLE:
+ case SER_SET_RECOVER_L3_BF:
+ ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), band);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_set_ser(dev, SER_RECOVER, val, band);
+ break;
+
+ /* enable full chip reset */
+ case SER_SET_RECOVER_FULL:
+ mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
+ ret = mt7915_mcu_set_ser(dev, 1, 3, band);
+ if (ret)
+ return ret;
+
+ dev->recovery.state |= MT_MCU_CMD_WDT_MASK;
+ mt7915_reset(dev);
+ break;
+
+ /* WARNING: trigger firmware crash */
+ case SER_SET_SYSTEM_ASSERT:
+ mt76_wr(dev, MT_MCU_WM_CIRQ_EINT_MASK_CLR_ADDR, BIT(18));
+ mt76_wr(dev, MT_MCU_WM_CIRQ_EINT_SOFT_ADDR, BIT(18));
+ break;
+ default:
+ break;
+ }
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+mt7915_sys_recovery_get(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ char *buff;
+ int desc = 0;
+ ssize_t ret;
+ static const size_t bufsz = 1024;
+
+ buff = kmalloc(bufsz, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ /* HELP */
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "Please echo the correct value ...\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "0: grab firmware transient SER state\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "1: trigger system error L1 recovery\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "2: trigger system error L2 recovery\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "3: trigger system error L3 rx abort\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "4: trigger system error L3 tx abort\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "5: trigger system error L3 tx disable\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "6: trigger system error L3 bf recovery\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "7: trigger system error full recovery\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "8: trigger firmware crash\n");
+
+ /* SER statistics */
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "\nlet's dump firmware SER statistics...\n");
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_STATUS = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_SER_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PLE_ERR = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PLE_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PLE_ERR_1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PLE1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PLE_ERR_AMSDU = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PLE_AMSDU_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PSE_ERR = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PSE_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_PSE_ERR_1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_PSE1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR6_B0 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN0_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR6_B1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR6_BN1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR7_B0 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN0_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "::E R , SER_LMAC_WISR7_B1 = 0x%08x\n",
+ mt76_rr(dev, MT_SWDEF_LAMC_WISR7_BN1_STATS));
+ desc += scnprintf(buff + desc, bufsz - desc,
+ "\nSYS_RESET_COUNT: WM %d, WA %d\n",
+ dev->recovery.wm_reset_count,
+ dev->recovery.wa_reset_count);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations mt7915_sys_recovery_ops = {
+ .write = mt7915_sys_recovery_set,
+ .read = mt7915_sys_recovery_get,
+ .open = simple_open,
+ .llseek = default_llseek,
+};
+
+static int
+mt7915_radar_trigger(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ if (val > MT_RX_SEL2)
+ return -EINVAL;
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_RADAR_EMULATE,
+ val, 0, 0);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
+ mt7915_radar_trigger, "%lld\n");
+
+static int
+mt7915_muru_debug_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+
+ dev->muru_debug = val;
+ mt7915_mcu_muru_debug_set(dev, dev->muru_debug);
+
+ return 0;
+}
+
+static int
+mt7915_muru_debug_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->muru_debug;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_muru_debug, mt7915_muru_debug_get,
+ mt7915_muru_debug_set, "%lld\n");
+
+static int mt7915_muru_stats_show(struct seq_file *file, void *data)
+{
+ struct mt7915_phy *phy = file->private;
+ struct mt7915_dev *dev = phy->dev;
+ static const char * const dl_non_he_type[] = {
+ "CCK", "OFDM", "HT MIX", "HT GF",
+ "VHT SU", "VHT 2MU", "VHT 3MU", "VHT 4MU"
+ };
+ static const char * const dl_he_type[] = {
+ "HE SU", "HE EXT", "HE 2MU", "HE 3MU", "HE 4MU",
+ "HE 2RU", "HE 3RU", "HE 4RU", "HE 5-8RU", "HE 9-16RU",
+ "HE >16RU"
+ };
+ static const char * const ul_he_type[] = {
+ "HE 2MU", "HE 3MU", "HE 4MU", "HE SU", "HE 2RU",
+ "HE 3RU", "HE 4RU", "HE 5-8RU", "HE 9-16RU", "HE >16RU"
+ };
+ int ret, i;
+ u64 total_ppdu_cnt, sub_total_cnt;
+
+ if (!dev->muru_debug) {
+ seq_puts(file, "Please enable muru_debug first.\n");
+ return 0;
+ }
+
+ mutex_lock(&dev->mt76.mutex);
+
+ ret = mt7915_mcu_muru_debug_get(phy);
+ if (ret)
+ goto exit;
+
+ /* Non-HE Downlink*/
+ seq_puts(file, "[Non-HE]\nDownlink\nData Type: ");
+
+ for (i = 0; i < 5; i++)
+ seq_printf(file, "%8s | ", dl_non_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | %8u | %8u | ",
+ phy->mib.dl_cck_cnt,
+ phy->mib.dl_ofdm_cnt,
+ phy->mib.dl_htmix_cnt,
+ phy->mib.dl_htgf_cnt,
+ phy->mib.dl_vht_su_cnt);
+
+ seq_puts(file, "\nDownlink MU-MIMO\nData Type: ");
+
+ for (i = 5; i < 8; i++)
+ seq_printf(file, "%8s | ", dl_non_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | ",
+ phy->mib.dl_vht_2mu_cnt,
+ phy->mib.dl_vht_3mu_cnt,
+ phy->mib.dl_vht_4mu_cnt);
+
+ sub_total_cnt = phy->mib.dl_vht_2mu_cnt +
+ phy->mib.dl_vht_3mu_cnt +
+ phy->mib.dl_vht_4mu_cnt;
+
+ seq_printf(file, "\nTotal non-HE MU-MIMO DL PPDU count: %lld",
+ sub_total_cnt);
+
+ total_ppdu_cnt = sub_total_cnt +
+ phy->mib.dl_cck_cnt +
+ phy->mib.dl_ofdm_cnt +
+ phy->mib.dl_htmix_cnt +
+ phy->mib.dl_htgf_cnt +
+ phy->mib.dl_vht_su_cnt;
+
+ seq_printf(file, "\nAll non-HE DL PPDU count: %lld", total_ppdu_cnt);
+
+ /* HE Downlink */
+ seq_puts(file, "\n\n[HE]\nDownlink\nData Type: ");
+
+ for (i = 0; i < 2; i++)
+ seq_printf(file, "%8s | ", dl_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | ",
+ phy->mib.dl_he_su_cnt, phy->mib.dl_he_ext_su_cnt);
+
+ seq_puts(file, "\nDownlink MU-MIMO\nData Type: ");
+
+ for (i = 2; i < 5; i++)
+ seq_printf(file, "%8s | ", dl_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | ",
+ phy->mib.dl_he_2mu_cnt, phy->mib.dl_he_3mu_cnt,
+ phy->mib.dl_he_4mu_cnt);
+
+ seq_puts(file, "\nDownlink OFDMA\nData Type: ");
+
+ for (i = 5; i < 11; i++)
+ seq_printf(file, "%8s | ", dl_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | %8u | %9u | %8u | ",
+ phy->mib.dl_he_2ru_cnt,
+ phy->mib.dl_he_3ru_cnt,
+ phy->mib.dl_he_4ru_cnt,
+ phy->mib.dl_he_5to8ru_cnt,
+ phy->mib.dl_he_9to16ru_cnt,
+ phy->mib.dl_he_gtr16ru_cnt);
+
+ sub_total_cnt = phy->mib.dl_he_2mu_cnt +
+ phy->mib.dl_he_3mu_cnt +
+ phy->mib.dl_he_4mu_cnt;
+ total_ppdu_cnt = sub_total_cnt;
+
+ seq_printf(file, "\nTotal HE MU-MIMO DL PPDU count: %lld",
+ sub_total_cnt);
+
+ sub_total_cnt = phy->mib.dl_he_2ru_cnt +
+ phy->mib.dl_he_3ru_cnt +
+ phy->mib.dl_he_4ru_cnt +
+ phy->mib.dl_he_5to8ru_cnt +
+ phy->mib.dl_he_9to16ru_cnt +
+ phy->mib.dl_he_gtr16ru_cnt;
+ total_ppdu_cnt += sub_total_cnt;
+
+ seq_printf(file, "\nTotal HE OFDMA DL PPDU count: %lld",
+ sub_total_cnt);
+
+ total_ppdu_cnt += phy->mib.dl_he_su_cnt + phy->mib.dl_he_ext_su_cnt;
+
+ seq_printf(file, "\nAll HE DL PPDU count: %lld", total_ppdu_cnt);
+
+ /* HE Uplink */
+ seq_puts(file, "\n\nUplink");
+ seq_puts(file, "\nTrigger-based Uplink MU-MIMO\nData Type: ");
+
+ for (i = 0; i < 3; i++)
+ seq_printf(file, "%8s | ", ul_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | ",
+ phy->mib.ul_hetrig_2mu_cnt,
+ phy->mib.ul_hetrig_3mu_cnt,
+ phy->mib.ul_hetrig_4mu_cnt);
+
+ seq_puts(file, "\nTrigger-based Uplink OFDMA\nData Type: ");
+
+ for (i = 3; i < 10; i++)
+ seq_printf(file, "%8s | ", ul_he_type[i]);
+
+ seq_puts(file, "\nTotal Count:");
+ seq_printf(file, "%8u | %8u | %8u | %8u | %8u | %9u | %7u | ",
+ phy->mib.ul_hetrig_su_cnt,
+ phy->mib.ul_hetrig_2ru_cnt,
+ phy->mib.ul_hetrig_3ru_cnt,
+ phy->mib.ul_hetrig_4ru_cnt,
+ phy->mib.ul_hetrig_5to8ru_cnt,
+ phy->mib.ul_hetrig_9to16ru_cnt,
+ phy->mib.ul_hetrig_gtr16ru_cnt);
+
+ sub_total_cnt = phy->mib.ul_hetrig_2mu_cnt +
+ phy->mib.ul_hetrig_3mu_cnt +
+ phy->mib.ul_hetrig_4mu_cnt;
+ total_ppdu_cnt = sub_total_cnt;
+
+ seq_printf(file, "\nTotal HE MU-MIMO UL TB PPDU count: %lld",
+ sub_total_cnt);
+
+ sub_total_cnt = phy->mib.ul_hetrig_2ru_cnt +
+ phy->mib.ul_hetrig_3ru_cnt +
+ phy->mib.ul_hetrig_4ru_cnt +
+ phy->mib.ul_hetrig_5to8ru_cnt +
+ phy->mib.ul_hetrig_9to16ru_cnt +
+ phy->mib.ul_hetrig_gtr16ru_cnt;
+ total_ppdu_cnt += sub_total_cnt;
+
+ seq_printf(file, "\nTotal HE OFDMA UL TB PPDU count: %lld",
+ sub_total_cnt);
+
+ total_ppdu_cnt += phy->mib.ul_hetrig_su_cnt;
+
+ seq_printf(file, "\nAll HE UL TB PPDU count: %lld\n", total_ppdu_cnt);
+
+exit:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(mt7915_muru_stats);
+
+static int
+mt7915_rdd_monitor(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ struct cfg80211_chan_def *chandef = &dev->rdd2_chandef;
+ const char *bw;
+ int ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (!cfg80211_chandef_valid(chandef)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!dev->rdd2_phy) {
+ seq_puts(s, "not running\n");
+ goto out;
+ }
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = "40";
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = "80";
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ bw = "160";
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ bw = "80P80";
+ break;
+ default:
+ bw = "20";
+ break;
+ }
+
+ seq_printf(s, "channel %d (%d MHz) width %s MHz center1: %d MHz\n",
+ chandef->chan->hw_value, chandef->chan->center_freq,
+ bw, chandef->center_freq1);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7915_fw_debug_wm_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+ enum {
+ DEBUG_TXCMD = 62,
+ DEBUG_CMD_RPT_TX,
+ DEBUG_CMD_RPT_TRIG,
+ DEBUG_SPL,
+ DEBUG_RPT_RX,
+ } debug;
+ bool tx, rx, en;
+ int ret;
+
+ dev->fw.debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
+
+ if (dev->fw.debug_bin)
+ val = 16;
+ else
+ val = dev->fw.debug_wm;
+
+ tx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(1));
+ rx = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(2));
+ en = dev->fw.debug_wm || (dev->fw.debug_bin & BIT(0));
+
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, val);
+ if (ret)
+ goto out;
+
+ for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) {
+ if (debug == DEBUG_RPT_RX)
+ val = en && rx;
+ else
+ val = en && tx;
+
+ ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, val);
+ if (ret)
+ goto out;
+ }
+
+ /* WM CPU info record control */
+ mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0));
+ mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw.debug_wm);
+ mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5));
+ mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5));
+
+out:
+ if (ret)
+ dev->fw.debug_wm = 0;
+
+ return ret;
+}
+
+static int
+mt7915_fw_debug_wm_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw.debug_wm;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wm, mt7915_fw_debug_wm_get,
+ mt7915_fw_debug_wm_set, "%lld\n");
+
+static int
+mt7915_fw_debug_wa_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+ int ret;
+
+ dev->fw.debug_wa = val ? MCU_FW_LOG_TO_HOST : 0;
+
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw.debug_wa);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
+ MCU_WA_PARAM_PDMA_RX, !!dev->fw.debug_wa, 0);
+out:
+ if (ret)
+ dev->fw.debug_wa = 0;
+
+ return ret;
+}
+
+static int
+mt7915_fw_debug_wa_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw.debug_wa;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wa, mt7915_fw_debug_wa_get,
+ mt7915_fw_debug_wa_set, "%lld\n");
+
+static struct dentry *
+create_buf_file_cb(const char *filename, struct dentry *parent, umode_t mode,
+ struct rchan_buf *buf, int *is_global)
+{
+ struct dentry *f;
+
+ f = debugfs_create_file("fwlog_data", mode, parent, buf,
+ &relay_file_operations);
+ if (IS_ERR(f))
+ return NULL;
+
+ *is_global = 1;
+
+ return f;
+}
+
+static int
+remove_buf_file_cb(struct dentry *f)
+{
+ debugfs_remove(f);
+
+ return 0;
+}
+
+static int
+mt7915_fw_debug_bin_set(void *data, u64 val)
+{
+ static struct rchan_callbacks relay_cb = {
+ .create_buf_file = create_buf_file_cb,
+ .remove_buf_file = remove_buf_file_cb,
+ };
+ struct mt7915_dev *dev = data;
+
+ if (!dev->relay_fwlog)
+ dev->relay_fwlog = relay_open("fwlog_data", dev->debugfs_dir,
+ 1500, 512, &relay_cb, NULL);
+ if (!dev->relay_fwlog)
+ return -ENOMEM;
+
+ dev->fw.debug_bin = val;
+
+ relay_reset(dev->relay_fwlog);
+
+ return mt7915_fw_debug_wm_set(dev, dev->fw.debug_wm);
+}
+
+static int
+mt7915_fw_debug_bin_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw.debug_bin;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_bin, mt7915_fw_debug_bin_get,
+ mt7915_fw_debug_bin_set, "%lld\n");
+
+static int
+mt7915_fw_util_wm_show(struct seq_file *file, void *data)
+{
+ struct mt7915_dev *dev = file->private;
+
+ seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WM_MCU_PC));
+
+ if (dev->fw.debug_wm) {
+ seq_printf(file, "Busy: %u%% Peak busy: %u%%\n",
+ mt76_rr(dev, MT_CPU_UTIL_BUSY_PCT),
+ mt76_rr(dev, MT_CPU_UTIL_PEAK_BUSY_PCT));
+ seq_printf(file, "Idle count: %u Peak idle count: %u\n",
+ mt76_rr(dev, MT_CPU_UTIL_IDLE_CNT),
+ mt76_rr(dev, MT_CPU_UTIL_PEAK_IDLE_CNT));
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7915_fw_util_wm);
+
+static int
+mt7915_fw_util_wa_show(struct seq_file *file, void *data)
+{
+ struct mt7915_dev *dev = file->private;
+
+ seq_printf(file, "Program counter: 0x%x\n", mt76_rr(dev, MT_WA_MCU_PC));
+
+ if (dev->fw.debug_wa)
+ return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(QUERY),
+ MCU_WA_PARAM_CPU_UTIL, 0, 0);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7915_fw_util_wa);
+
+static void
+mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
+ struct seq_file *file)
+{
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ int bound[15], range[4], i;
+ u8 band = phy->mt76->band_idx;
+
+ /* Tx ampdu stat */
+ for (i = 0; i < ARRAY_SIZE(range); i++)
+ range[i] = mt76_rr(dev, MT_MIB_ARNG(band, i));
+
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ bound[i] = MT_MIB_ARNCR_RANGE(range[i / 4], i % 4) + 1;
+
+ seq_printf(file, "\nPhy %d, Phy band %d\n", ext_phy, band);
+
+ seq_printf(file, "Length: %8d | ", bound[0]);
+ for (i = 0; i < ARRAY_SIZE(bound) - 1; i++)
+ seq_printf(file, "%3d -%3d | ",
+ bound[i] + 1, bound[i + 1]);
+
+ seq_puts(file, "\nCount: ");
+ for (i = 0; i < ARRAY_SIZE(bound); i++)
+ seq_printf(file, "%8d | ", phy->mt76->aggr_stats[i]);
+ seq_puts(file, "\n");
+
+ seq_printf(file, "BA miss count: %d\n", phy->mib.ba_miss_cnt);
+}
+
+static void
+mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
+{
+ struct mt76_mib_stats *mib = &phy->mib;
+ static const char * const bw[] = {
+ "BW20", "BW40", "BW80", "BW160"
+ };
+
+ /* Tx Beamformer monitor */
+ seq_puts(s, "\nTx Beamformer applied PPDU counts: ");
+
+ seq_printf(s, "iBF: %d, eBF: %d\n",
+ mib->tx_bf_ibf_ppdu_cnt,
+ mib->tx_bf_ebf_ppdu_cnt);
+
+ /* Tx Beamformer Rx feedback monitor */
+ seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
+
+ seq_printf(s, "All: %d, HE: %d, VHT: %d, HT: %d, ",
+ mib->tx_bf_rx_fb_all_cnt,
+ mib->tx_bf_rx_fb_he_cnt,
+ mib->tx_bf_rx_fb_vht_cnt,
+ mib->tx_bf_rx_fb_ht_cnt);
+
+ seq_printf(s, "%s, NC: %d, NR: %d\n",
+ bw[mib->tx_bf_rx_fb_bw],
+ mib->tx_bf_rx_fb_nc_cnt,
+ mib->tx_bf_rx_fb_nr_cnt);
+
+ /* Tx Beamformee Rx NDPA & Tx feedback report */
+ seq_printf(s, "Tx Beamformee successful feedback frames: %d\n",
+ mib->tx_bf_fb_cpl_cnt);
+ seq_printf(s, "Tx Beamformee feedback triggered counts: %d\n",
+ mib->tx_bf_fb_trig_cnt);
+
+ /* Tx SU & MU counters */
+ seq_printf(s, "Tx multi-user Beamforming counts: %d\n",
+ mib->tx_bf_cnt);
+ seq_printf(s, "Tx multi-user MPDU counts: %d\n", mib->tx_mu_mpdu_cnt);
+ seq_printf(s, "Tx multi-user successful MPDU counts: %d\n",
+ mib->tx_mu_acked_mpdu_cnt);
+ seq_printf(s, "Tx single-user successful MPDU counts: %d\n",
+ mib->tx_su_acked_mpdu_cnt);
+
+ seq_puts(s, "\n");
+}
+
+static int
+mt7915_tx_stats_show(struct seq_file *file, void *data)
+{
+ struct mt7915_phy *phy = file->private;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_mib_stats *mib = &phy->mib;
+ int i;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7915_ampdu_stat_read_phy(phy, file);
+ mt7915_mac_update_stats(phy);
+ mt7915_txbf_stat_read_phy(phy, file);
+
+ /* Tx amsdu info */
+ seq_puts(file, "Tx MSDU statistics:\n");
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
+ seq_printf(file, "AMSDU pack count of %d MSDU in TXD: %8d ",
+ i + 1, mib->tx_amsdu[i]);
+ if (mib->tx_amsdu_cnt)
+ seq_printf(file, "(%3d%%)\n",
+ mib->tx_amsdu[i] * 100 / mib->tx_amsdu_cnt);
+ else
+ seq_puts(file, "\n");
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
+
+static void
+mt7915_hw_queue_read(struct seq_file *s, u32 size,
+ const struct hw_queue_map *map)
+{
+ struct mt7915_phy *phy = s->private;
+ struct mt7915_dev *dev = phy->dev;
+ u32 i, val;
+
+ val = mt76_rr(dev, MT_FL_Q_EMPTY);
+ for (i = 0; i < size; i++) {
+ u32 ctrl, head, tail, queued;
+
+ if (val & BIT(map[i].index))
+ continue;
+
+ ctrl = BIT(31) | (map[i].pid << 10) | ((u32)map[i].qid << 24);
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl);
+
+ head = mt76_get_field(dev, MT_FL_Q2_CTRL,
+ GENMASK(11, 0));
+ tail = mt76_get_field(dev, MT_FL_Q2_CTRL,
+ GENMASK(27, 16));
+ queued = mt76_get_field(dev, MT_FL_Q3_CTRL,
+ GENMASK(11, 0));
+
+ seq_printf(s, "\t%s: ", map[i].name);
+ seq_printf(s, "queued:0x%03x head:0x%03x tail:0x%03x\n",
+ queued, head, tail);
+ }
+}
+
+static void
+mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ struct seq_file *s = data;
+ u8 ac;
+
+ for (ac = 0; ac < 4; ac++) {
+ u32 qlen, ctrl, val;
+ u32 idx = msta->wcid.idx >> 5;
+ u8 offs = msta->wcid.idx & GENMASK(4, 0);
+
+ ctrl = BIT(31) | BIT(11) | (ac << 24);
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx));
+
+ if (val & BIT(offs))
+ continue;
+
+ mt76_wr(dev, MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
+ qlen = mt76_get_field(dev, MT_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
+ sta->addr, msta->wcid.idx,
+ msta->vif->mt76.wmm_idx, ac, qlen);
+ }
+}
+
+static int
+mt7915_hw_queues_show(struct seq_file *file, void *data)
+{
+ struct mt7915_phy *phy = file->private;
+ struct mt7915_dev *dev = phy->dev;
+ static const struct hw_queue_map ple_queue_map[] = {
+ { "CPU_Q0", 0, 1, MT_CTX0 },
+ { "CPU_Q1", 1, 1, MT_CTX0 + 1 },
+ { "CPU_Q2", 2, 1, MT_CTX0 + 2 },
+ { "CPU_Q3", 3, 1, MT_CTX0 + 3 },
+ { "ALTX_Q0", 8, 2, MT_LMAC_ALTX0 },
+ { "BMC_Q0", 9, 2, MT_LMAC_BMC0 },
+ { "BCN_Q0", 10, 2, MT_LMAC_BCN0 },
+ { "PSMP_Q0", 11, 2, MT_LMAC_PSMP0 },
+ { "ALTX_Q1", 12, 2, MT_LMAC_ALTX0 + 4 },
+ { "BMC_Q1", 13, 2, MT_LMAC_BMC0 + 4 },
+ { "BCN_Q1", 14, 2, MT_LMAC_BCN0 + 4 },
+ { "PSMP_Q1", 15, 2, MT_LMAC_PSMP0 + 4 },
+ };
+ static const struct hw_queue_map pse_queue_map[] = {
+ { "CPU Q0", 0, 1, MT_CTX0 },
+ { "CPU Q1", 1, 1, MT_CTX0 + 1 },
+ { "CPU Q2", 2, 1, MT_CTX0 + 2 },
+ { "CPU Q3", 3, 1, MT_CTX0 + 3 },
+ { "HIF_Q0", 8, 0, MT_HIF0 },
+ { "HIF_Q1", 9, 0, MT_HIF0 + 1 },
+ { "HIF_Q2", 10, 0, MT_HIF0 + 2 },
+ { "HIF_Q3", 11, 0, MT_HIF0 + 3 },
+ { "HIF_Q4", 12, 0, MT_HIF0 + 4 },
+ { "HIF_Q5", 13, 0, MT_HIF0 + 5 },
+ { "LMAC_Q", 16, 2, 0 },
+ { "MDP_TXQ", 17, 2, 1 },
+ { "MDP_RXQ", 18, 2, 2 },
+ { "SEC_TXQ", 19, 2, 3 },
+ { "SEC_RXQ", 20, 2, 4 },
+ };
+ u32 val, head, tail;
+
+ /* ple queue */
+ val = mt76_rr(dev, MT_PLE_FREEPG_CNT);
+ head = mt76_get_field(dev, MT_PLE_FREEPG_HEAD_TAIL, GENMASK(11, 0));
+ tail = mt76_get_field(dev, MT_PLE_FREEPG_HEAD_TAIL, GENMASK(27, 16));
+ seq_puts(file, "PLE page info:\n");
+ seq_printf(file,
+ "\tTotal free page: 0x%08x head: 0x%03x tail: 0x%03x\n",
+ val, head, tail);
+
+ val = mt76_rr(dev, MT_PLE_PG_HIF_GROUP);
+ head = mt76_get_field(dev, MT_PLE_HIF_PG_INFO, GENMASK(11, 0));
+ tail = mt76_get_field(dev, MT_PLE_HIF_PG_INFO, GENMASK(27, 16));
+ seq_printf(file, "\tHIF free page: 0x%03x res: 0x%03x used: 0x%03x\n",
+ val, head, tail);
+
+ seq_puts(file, "PLE non-empty queue info:\n");
+ mt7915_hw_queue_read(file, ARRAY_SIZE(ple_queue_map),
+ &ple_queue_map[0]);
+
+ /* iterate per-sta ple queue */
+ ieee80211_iterate_stations_atomic(phy->mt76->hw,
+ mt7915_sta_hw_queue_read, file);
+ /* pse queue */
+ seq_puts(file, "PSE non-empty queue info:\n");
+ mt7915_hw_queue_read(file, ARRAY_SIZE(pse_queue_map),
+ &pse_queue_map[0]);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7915_hw_queues);
+
+static int
+mt7915_xmit_queues_show(struct seq_file *file, void *data)
+{
+ struct mt7915_phy *phy = file->private;
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ struct mt76_queue *q;
+ char *queue;
+ } queue_map[] = {
+ { phy->mt76->q_tx[MT_TXQ_BE], " MAIN" },
+ { dev->mt76.q_mcu[MT_MCUQ_WM], " MCUWM" },
+ { dev->mt76.q_mcu[MT_MCUQ_WA], " MCUWA" },
+ { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWDL" },
+ };
+ int i;
+
+ seq_puts(file, " queue | hw-queued | head | tail |\n");
+ for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
+ struct mt76_queue *q = queue_map[i].q;
+
+ if (!q)
+ continue;
+
+ seq_printf(file, " %s | %9d | %9d | %9d |\n",
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7915_xmit_queues);
+
+#define mt7915_txpower_puts(rate) \
+({ \
+ len += scnprintf(buf + len, sz - len, "%-16s:", #rate " (TMAC)"); \
+ for (i = 0; i < mt7915_sku_group_len[SKU_##rate]; i++, offs++) \
+ len += scnprintf(buf + len, sz - len, " %6d", txpwr[offs]); \
+ len += scnprintf(buf + len, sz - len, "\n"); \
+})
+
+#define mt7915_txpower_sets(rate, pwr, flag) \
+({ \
+ offs += len; \
+ len = mt7915_sku_group_len[rate]; \
+ if (mode == flag) { \
+ for (i = 0; i < len; i++) \
+ req.txpower_sku[offs + i] = pwr; \
+ } \
+})
+
+static ssize_t
+mt7915_rate_txpower_get(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ s8 txpwr[MT7915_SKU_RATE_NUM];
+ static const size_t sz = 2048;
+ u8 band = phy->mt76->band_idx;
+ int i, offs = 0, len = 0;
+ ssize_t ret;
+ char *buf;
+ u32 reg;
+
+ buf = kzalloc(sz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = mt7915_mcu_get_txpower_sku(phy, txpwr, sizeof(txpwr));
+ if (ret)
+ goto out;
+
+ /* Txpower propagation path: TMAC -> TXV -> BBP */
+ len += scnprintf(buf + len, sz - len,
+ "\nPhy%d Tx power table (channel %d)\n",
+ phy != &dev->phy, phy->mt76->chandef.chan->hw_value);
+ len += scnprintf(buf + len, sz - len, "%-16s %6s %6s %6s %6s\n",
+ " ", "1m", "2m", "5m", "11m");
+ mt7915_txpower_puts(CCK);
+
+ len += scnprintf(buf + len, sz - len,
+ "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "6m", "9m", "12m", "18m", "24m", "36m", "48m",
+ "54m");
+ mt7915_txpower_puts(OFDM);
+
+ len += scnprintf(buf + len, sz - len,
+ "%-16s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4",
+ "mcs5", "mcs6", "mcs7");
+ mt7915_txpower_puts(HT_BW20);
+
+ len += scnprintf(buf + len, sz - len,
+ "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
+ "mcs6", "mcs7", "mcs32");
+ mt7915_txpower_puts(HT_BW40);
+
+ len += scnprintf(buf + len, sz - len,
+ "%-16s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s %6s\n",
+ " ", "mcs0", "mcs1", "mcs2", "mcs3", "mcs4", "mcs5",
+ "mcs6", "mcs7", "mcs8", "mcs9", "mcs10", "mcs11");
+ mt7915_txpower_puts(VHT_BW20);
+ mt7915_txpower_puts(VHT_BW40);
+ mt7915_txpower_puts(VHT_BW80);
+ mt7915_txpower_puts(VHT_BW160);
+ mt7915_txpower_puts(HE_RU26);
+ mt7915_txpower_puts(HE_RU52);
+ mt7915_txpower_puts(HE_RU106);
+ mt7915_txpower_puts(HE_RU242);
+ mt7915_txpower_puts(HE_RU484);
+ mt7915_txpower_puts(HE_RU996);
+ mt7915_txpower_puts(HE_RU2x996);
+
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_TPC_CTRL_STAT(band) :
+ MT_WF_PHY_TPC_CTRL_STAT_MT7916(band);
+
+ len += scnprintf(buf + len, sz - len, "\nTx power (bbp) : %6ld\n",
+ mt76_get_field(dev, reg, MT_WF_PHY_TPC_POWER));
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct mt7915_phy *phy = file->private_data;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct mt7915_mcu_txpower_sku req = {
+ .format_id = TX_POWER_LIMIT_TABLE,
+ .band_idx = phy->mt76->band_idx,
+ };
+ char buf[100];
+ int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
+ enum mac80211_rx_encoding mode;
+ u32 offs = 0, len = 0;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
+
+ if (sscanf(buf, "%u %u %u %u %u",
+ &mode, &pwr160, &pwr80, &pwr40, &pwr20) != 5) {
+ dev_warn(dev->mt76.dev,
+ "per bandwidth power limit: Mode BW160 BW80 BW40 BW20");
+ return -EINVAL;
+ }
+
+ if (mode > RX_ENC_HE)
+ return -EINVAL;
+
+ if (pwr160)
+ pwr160 = mt7915_get_power_bound(phy, pwr160);
+ if (pwr80)
+ pwr80 = mt7915_get_power_bound(phy, pwr80);
+ if (pwr40)
+ pwr40 = mt7915_get_power_bound(phy, pwr40);
+ if (pwr20)
+ pwr20 = mt7915_get_power_bound(phy, pwr20);
+
+ if (pwr160 < 0 || pwr80 < 0 || pwr40 < 0 || pwr20 < 0)
+ return -EINVAL;
+
+ mutex_lock(&dev->mt76.mutex);
+ ret = mt7915_mcu_get_txpower_sku(phy, req.txpower_sku,
+ sizeof(req.txpower_sku));
+ if (ret)
+ goto out;
+
+ mt7915_txpower_sets(SKU_CCK, pwr20, RX_ENC_LEGACY);
+ mt7915_txpower_sets(SKU_OFDM, pwr20, RX_ENC_LEGACY);
+ if (mode == RX_ENC_LEGACY)
+ goto skip;
+
+ mt7915_txpower_sets(SKU_HT_BW20, pwr20, RX_ENC_HT);
+ mt7915_txpower_sets(SKU_HT_BW40, pwr40, RX_ENC_HT);
+ if (mode == RX_ENC_HT)
+ goto skip;
+
+ mt7915_txpower_sets(SKU_VHT_BW20, pwr20, RX_ENC_VHT);
+ mt7915_txpower_sets(SKU_VHT_BW40, pwr40, RX_ENC_VHT);
+ mt7915_txpower_sets(SKU_VHT_BW80, pwr80, RX_ENC_VHT);
+ mt7915_txpower_sets(SKU_VHT_BW160, pwr160, RX_ENC_VHT);
+ if (mode == RX_ENC_VHT)
+ goto skip;
+
+ mt7915_txpower_sets(SKU_HE_RU26, pwr20, RX_ENC_HE + 1);
+ mt7915_txpower_sets(SKU_HE_RU52, pwr20, RX_ENC_HE + 1);
+ mt7915_txpower_sets(SKU_HE_RU106, pwr20, RX_ENC_HE + 1);
+ mt7915_txpower_sets(SKU_HE_RU242, pwr20, RX_ENC_HE);
+ mt7915_txpower_sets(SKU_HE_RU484, pwr40, RX_ENC_HE);
+ mt7915_txpower_sets(SKU_HE_RU996, pwr80, RX_ENC_HE);
+ mt7915_txpower_sets(SKU_HE_RU2x996, pwr160, RX_ENC_HE);
+skip:
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
+ &req, sizeof(req), true);
+ if (ret)
+ goto out;
+
+ mphy->txpower_cur = max(mphy->txpower_cur,
+ max(pwr160, max(pwr80, max(pwr40, pwr20))));
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret ? ret : count;
+}
+
+static const struct file_operations mt7915_rate_txpower_fops = {
+ .write = mt7915_rate_txpower_set,
+ .read = mt7915_rate_txpower_get,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int
+mt7915_twt_stats(struct seq_file *s, void *data)
+{
+ struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ struct mt7915_twt_flow *iter;
+
+ rcu_read_lock();
+
+ seq_puts(s, " wcid | id | flags | exp | mantissa");
+ seq_puts(s, " | duration | tsf |\n");
+ list_for_each_entry_rcu(iter, &dev->twt_list, list)
+ seq_printf(s,
+ "%9d | %8d | %5c%c%c%c | %8d | %8d | %8d | %14lld |\n",
+ iter->wcid, iter->id,
+ iter->sched ? 's' : 'u',
+ iter->protection ? 'p' : '-',
+ iter->trigger ? 't' : '-',
+ iter->flowtype ? '-' : 'a',
+ iter->exp, iter->mantissa,
+ iter->duration, iter->tsf);
+
+ rcu_read_unlock();
+
+ return 0;
+}
+
+/* The index of RF registers use the generic regidx, combined with two parts:
+ * WF selection [31:24] and offset [23:0].
+ */
+static int
+mt7915_rf_regval_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+ u32 regval;
+ int ret;
+
+ ret = mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &regval, false);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+}
+
+static int
+mt7915_rf_regval_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+ u32 val32 = val;
+
+ return mt7915_mcu_rf_regval(dev, dev->mt76.debugfs_reg, &val32, true);
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_rf_regval, mt7915_rf_regval_get,
+ mt7915_rf_regval_set, "0x%08llx\n");
+
+int mt7915_init_debugfs(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
+ struct dentry *dir;
+
+ dir = mt76_register_debugfs_fops(phy->mt76, NULL);
+ if (!dir)
+ return -ENOMEM;
+ debugfs_create_file("muru_debug", 0600, dir, dev, &fops_muru_debug);
+ debugfs_create_file("muru_stats", 0400, dir, phy,
+ &mt7915_muru_stats_fops);
+ debugfs_create_file("hw-queues", 0400, dir, phy,
+ &mt7915_hw_queues_fops);
+ debugfs_create_file("xmit-queues", 0400, dir, phy,
+ &mt7915_xmit_queues_fops);
+ debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
+ debugfs_create_file("sys_recovery", 0600, dir, phy,
+ &mt7915_sys_recovery_ops);
+ debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
+ debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
+ debugfs_create_file("fw_debug_bin", 0600, dir, dev, &fops_fw_debug_bin);
+ debugfs_create_file("fw_util_wm", 0400, dir, dev,
+ &mt7915_fw_util_wm_fops);
+ debugfs_create_file("fw_util_wa", 0400, dir, dev,
+ &mt7915_fw_util_wa_fops);
+ debugfs_create_file("implicit_txbf", 0600, dir, dev,
+ &fops_implicit_txbf);
+ debugfs_create_file("txpower_sku", 0400, dir, phy,
+ &mt7915_rate_txpower_fops);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
+ mt7915_twt_stats);
+ debugfs_create_file("rf_regval", 0600, dir, dev, &fops_rf_regval);
+
+ if (!dev->dbdc_support || phy->mt76->band_idx) {
+ debugfs_create_u32("dfs_hw_pattern", 0400, dir,
+ &dev->hw_pattern);
+ debugfs_create_file("radar_trigger", 0200, dir, dev,
+ &fops_radar_trigger);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "rdd_monitor", dir,
+ mt7915_rdd_monitor);
+ }
+
+ if (!ext_phy)
+ dev->debugfs_dir = dir;
+
+ return 0;
+}
+
+static void
+mt7915_debugfs_write_fwlog(struct mt7915_dev *dev, const void *hdr, int hdrlen,
+ const void *data, int len)
+{
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ void *dest;
+
+ spin_lock_irqsave(&lock, flags);
+ dest = relay_reserve(dev->relay_fwlog, hdrlen + len + 4);
+ if (dest) {
+ *(u32 *)dest = hdrlen + len;
+ dest += 4;
+
+ if (hdrlen) {
+ memcpy(dest, hdr, hdrlen);
+ dest += hdrlen;
+ }
+
+ memcpy(dest, data, len);
+ relay_flush(dev->relay_fwlog);
+ }
+ spin_unlock_irqrestore(&lock, flags);
+}
+
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len)
+{
+ struct {
+ __le32 magic;
+ __le32 timestamp;
+ __le16 msg_type;
+ __le16 len;
+ } hdr = {
+ .magic = cpu_to_le32(FW_BIN_LOG_MAGIC),
+ .msg_type = cpu_to_le16(PKT_TYPE_RX_FW_MONITOR),
+ };
+
+ if (!dev->relay_fwlog)
+ return;
+
+ hdr.timestamp = cpu_to_le32(mt76_rr(dev, MT_LPON_FRCR(0)));
+ hdr.len = *(__le16 *)data;
+ mt7915_debugfs_write_fwlog(dev, &hdr, sizeof(hdr), data, len);
+}
+
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len)
+{
+ if (get_unaligned_le32(data) != FW_BIN_LOG_MAGIC)
+ return false;
+
+ if (dev->relay_fwlog)
+ mt7915_debugfs_write_fwlog(dev, NULL, 0, data, len);
+
+ return true;
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+/** per-station debugfs **/
+
+static ssize_t mt7915_sta_fixed_rate_set(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_sta *sta = file->private_data;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ struct ieee80211_vif *vif;
+ struct sta_phy phy = {};
+ char buf[100];
+ int ret;
+ u32 field;
+ u8 i, gi, he_ltf;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
+
+ /* mode - cck: 0, ofdm: 1, ht: 2, gf: 3, vht: 4, he_su: 8, he_er: 9
+ * bw - bw20: 0, bw40: 1, bw80: 2, bw160: 3
+ * nss - vht: 1~4, he: 1~4, others: ignore
+ * mcs - cck: 0~4, ofdm: 0~7, ht: 0~32, vht: 0~9, he_su: 0~11, he_er: 0~2
+ * gi - (ht/vht) lgi: 0, sgi: 1; (he) 0.8us: 0, 1.6us: 1, 3.2us: 2
+ * ldpc - off: 0, on: 1
+ * stbc - off: 0, on: 1
+ * he_ltf - 1xltf: 0, 2xltf: 1, 4xltf: 2
+ */
+ if (sscanf(buf, "%hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu",
+ &phy.type, &phy.bw, &phy.nss, &phy.mcs, &gi,
+ &phy.ldpc, &phy.stbc, &he_ltf) != 8) {
+ dev_warn(dev->mt76.dev,
+ "format: Mode BW NSS MCS (HE)GI LDPC STBC HE_LTF\n");
+ field = RATE_PARAM_AUTO;
+ goto out;
+ }
+
+ phy.ldpc = (phy.bw || phy.ldpc) * GENMASK(2, 0);
+ for (i = 0; i <= phy.bw; i++) {
+ phy.sgi |= gi << (i << sta->deflink.he_cap.has_he);
+ phy.he_ltf |= he_ltf << (i << sta->deflink.he_cap.has_he);
+ }
+ field = RATE_PARAM_FIXED;
+
+out:
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, field);
+ if (ret)
+ return -EFAULT;
+
+ return count;
+}
+
+static const struct file_operations fops_fixed_rate = {
+ .write = mt7915_sta_fixed_rate_set,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int
+mt7915_queues_show(struct seq_file *s, void *data)
+{
+ struct ieee80211_sta *sta = s->private;
+
+ mt7915_sta_hw_queue_read(s, sta);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7915_queues);
+
+void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
+{
+ debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate);
+ debugfs_create_file("hw-queues", 0400, dir, sta, &mt7915_queues_fops);
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
new file mode 100644
index 0000000000..59a44d79aa
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -0,0 +1,671 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7915.h"
+#include "../dma.h"
+#include "mac.h"
+
+static int
+mt7915_init_tx_queues(struct mt7915_phy *phy, int idx, int n_desc, int ring_base)
+{
+ struct mt7915_dev *dev = phy->dev;
+
+ if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ if (is_mt798x(&dev->mt76))
+ ring_base += MT_TXQ_ID(0) * MT_RING_SIZE;
+ else
+ ring_base = MT_WED_TX_RING_BASE;
+
+ idx -= MT_TXQ_ID(0);
+ }
+
+ return mt76_connac_init_tx_queues(phy->mt76, idx, n_desc, ring_base,
+ MT_WED_Q_TX(idx));
+}
+
+static int mt7915_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct mt7915_dev *dev;
+
+ dev = container_of(napi, struct mt7915_dev, mt76.tx_napi);
+
+ mt76_connac_tx_cleanup(&dev->mt76);
+ if (napi_complete_done(napi, 0))
+ mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU);
+
+ return 0;
+}
+
+static void mt7915_dma_config(struct mt7915_dev *dev)
+{
+#define Q_CONFIG(q, wfdma, int, id) do { \
+ if (wfdma) \
+ dev->wfdma_mask |= (1 << (q)); \
+ dev->q_int_mask[(q)] = int; \
+ dev->q_id[(q)] = id; \
+ } while (0)
+
+#define MCUQ_CONFIG(q, wfdma, int, id) Q_CONFIG(q, (wfdma), (int), (id))
+#define RXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__RXQ(q), (wfdma), (int), (id))
+#define TXQ_CONFIG(q, wfdma, int, id) Q_CONFIG(__TXQ(q), (wfdma), (int), (id))
+
+ if (is_mt7915(&dev->mt76)) {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0,
+ MT7915_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA1, MT_INT_RX_DONE_WM,
+ MT7915_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA1, MT_INT_RX_DONE_WA,
+ MT7915_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1,
+ MT7915_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA1, MT_INT_RX_DONE_WA_EXT,
+ MT7915_RXQ_MCU_WA_EXT);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA1, MT_INT_RX_DONE_WA_MAIN,
+ MT7915_RXQ_MCU_WA);
+ TXQ_CONFIG(0, WFDMA1, MT_INT_TX_DONE_BAND0, MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA1, MT_INT_TX_DONE_BAND1, MT7915_TXQ_BAND1);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA1, MT_INT_TX_DONE_MCU_WM,
+ MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA1, MT_INT_TX_DONE_MCU_WA,
+ MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA1, MT_INT_TX_DONE_FWDL,
+ MT7915_TXQ_FWDL);
+ } else {
+ RXQ_CONFIG(MT_RXQ_MCU, WFDMA0, MT_INT_RX_DONE_WM,
+ MT7916_RXQ_MCU_WM);
+ RXQ_CONFIG(MT_RXQ_BAND1_WA, WFDMA0, MT_INT_RX_DONE_WA_EXT_MT7916,
+ MT7916_RXQ_MCU_WA_EXT);
+ MCUQ_CONFIG(MT_MCUQ_WM, WFDMA0, MT_INT_TX_DONE_MCU_WM,
+ MT7915_TXQ_MCU_WM);
+ MCUQ_CONFIG(MT_MCUQ_WA, WFDMA0, MT_INT_TX_DONE_MCU_WA_MT7916,
+ MT7915_TXQ_MCU_WA);
+ MCUQ_CONFIG(MT_MCUQ_FWDL, WFDMA0, MT_INT_TX_DONE_FWDL,
+ MT7915_TXQ_FWDL);
+
+ if (is_mt7916(&dev->mt76) && mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_WED_RX_DONE_BAND0_MT7916,
+ MT7916_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MT7916,
+ MT7916_RXQ_MCU_WA);
+ if (dev->hif2)
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0,
+ MT_INT_RX_DONE_BAND1_MT7916,
+ MT7916_RXQ_BAND1);
+ else
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0,
+ MT_INT_WED_RX_DONE_BAND1_MT7916,
+ MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_WED_RX_DONE_WA_MAIN_MT7916,
+ MT7916_RXQ_MCU_WA_MAIN);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_WED_TX_DONE_BAND0,
+ MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_WED_TX_DONE_BAND1,
+ MT7915_TXQ_BAND1);
+ } else {
+ RXQ_CONFIG(MT_RXQ_MAIN, WFDMA0, MT_INT_RX_DONE_BAND0_MT7916,
+ MT7916_RXQ_BAND0);
+ RXQ_CONFIG(MT_RXQ_MCU_WA, WFDMA0, MT_INT_RX_DONE_WA,
+ MT7916_RXQ_MCU_WA);
+ RXQ_CONFIG(MT_RXQ_BAND1, WFDMA0, MT_INT_RX_DONE_BAND1_MT7916,
+ MT7916_RXQ_BAND1);
+ RXQ_CONFIG(MT_RXQ_MAIN_WA, WFDMA0, MT_INT_RX_DONE_WA_MAIN_MT7916,
+ MT7916_RXQ_MCU_WA_MAIN);
+ TXQ_CONFIG(0, WFDMA0, MT_INT_TX_DONE_BAND0,
+ MT7915_TXQ_BAND0);
+ TXQ_CONFIG(1, WFDMA0, MT_INT_TX_DONE_BAND1,
+ MT7915_TXQ_BAND1);
+ }
+ }
+}
+
+static void __mt7915_dma_prefetch(struct mt7915_dev *dev, u32 ofs)
+{
+#define PREFETCH(_base, _depth) ((_base) << 16 | (_depth))
+ u32 base = 0;
+
+ /* prefetch SRAM wrapping boundary for tx/rx ring. */
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_FWDL) + ofs, PREFETCH(0x0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WM) + ofs, PREFETCH(0x40, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(0) + ofs, PREFETCH(0x80, 0x4));
+ mt76_wr(dev, MT_TXQ_EXT_CTRL(1) + ofs, PREFETCH(0xc0, 0x4));
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs, PREFETCH(0x100, 0x4));
+
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU) + ofs,
+ PREFETCH(0x140, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MCU_WA) + ofs,
+ PREFETCH(0x180, 0x4));
+ if (!is_mt7915(&dev->mt76)) {
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN_WA) + ofs,
+ PREFETCH(0x1c0, 0x4));
+ base = 0x40;
+ }
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs,
+ PREFETCH(0x1c0 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_MAIN) + ofs,
+ PREFETCH(0x200 + base, 0x4));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs,
+ PREFETCH(0x240 + base, 0x4));
+
+ /* for mt7915, the ring which is next the last
+ * used ring must be initialized.
+ */
+ if (is_mt7915(&dev->mt76)) {
+ ofs += 0x4;
+ mt76_wr(dev, MT_MCUQ_EXT_CTRL(MT_MCUQ_WA) + ofs,
+ PREFETCH(0x140, 0x0));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1_WA) + ofs,
+ PREFETCH(0x200 + base, 0x0));
+ mt76_wr(dev, MT_RXQ_BAND1_CTRL(MT_RXQ_BAND1) + ofs,
+ PREFETCH(0x280 + base, 0x0));
+ }
+}
+
+void mt7915_dma_prefetch(struct mt7915_dev *dev)
+{
+ __mt7915_dma_prefetch(dev, 0);
+ if (dev->hif2)
+ __mt7915_dma_prefetch(dev, MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0));
+}
+
+static void mt7915_dma_disable(struct mt7915_dev *dev, bool rst)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* reset */
+ if (rst) {
+ mt76_clear(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA0_RST + hif1_ofs,
+ MT_WFDMA0_RST_DMASHDL_ALL_RST |
+ MT_WFDMA0_RST_LOGIC_RST);
+
+ if (is_mt7915(mdev)) {
+ mt76_clear(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+
+ mt76_set(dev, MT_WFDMA1_RST + hif1_ofs,
+ MT_WFDMA1_RST_DMASHDL_ALL_RST |
+ MT_WFDMA1_RST_LOGIC_RST);
+ }
+ }
+ }
+
+ /* disable */
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (dev->hif2) {
+ mt76_clear(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_clear(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2);
+ }
+}
+
+int mt7915_dma_start(struct mt7915_dev *dev, bool reset, bool wed_reset)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+ u32 irq_mask;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* enable wpdma tx/rx */
+ if (!reset) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_GLO_CFG + hif1_ofs,
+ MT_WFDMA0_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA0_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_GLO_CFG + hif1_ofs,
+ MT_WFDMA1_GLO_CFG_TX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_RX_DMA_EN |
+ MT_WFDMA1_GLO_CFG_OMIT_TX_INFO |
+ MT_WFDMA1_GLO_CFG_OMIT_RX_INFO);
+
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_PDMA_BAND);
+ }
+ }
+
+ /* enable interrupts for TX/RX rings */
+ irq_mask = MT_INT_RX_DONE_MCU |
+ MT_INT_TX_DONE_MCU |
+ MT_INT_MCU_CMD;
+
+ if (!dev->phy.mt76->band_idx)
+ irq_mask |= MT_INT_BAND0_RX_DONE;
+
+ if (dev->dbdc_support || dev->phy.mt76->band_idx)
+ irq_mask |= MT_INT_BAND1_RX_DONE;
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed) && wed_reset) {
+ u32 wed_irq_mask = irq_mask;
+ int ret;
+
+ wed_irq_mask |= MT_INT_TX_DONE_BAND0 | MT_INT_TX_DONE_BAND1;
+ if (!is_mt798x(&dev->mt76))
+ mt76_wr(dev, MT_INT_WED_MASK_CSR, wed_irq_mask);
+ else
+ mt76_wr(dev, MT_INT_MASK_CSR, wed_irq_mask);
+
+ ret = mt7915_mcu_wed_enable_rx_stats(dev);
+ if (ret)
+ return ret;
+
+ mtk_wed_device_start(&dev->mt76.mmio.wed, wed_irq_mask);
+ }
+
+ irq_mask = reset ? MT_INT_MCU_CMD : irq_mask;
+
+ mt7915_irq_enable(dev, irq_mask);
+ mt7915_irq_disable(dev, 0);
+
+ return 0;
+}
+
+static int mt7915_dma_enable(struct mt7915_dev *dev, bool reset)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 hif1_ofs = 0;
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ /* reset dma idx */
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR, ~0);
+ if (dev->hif2) {
+ mt76_wr(dev, MT_WFDMA0_RST_DTX_PTR + hif1_ofs, ~0);
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_WFDMA1_RST_DTX_PTR + hif1_ofs, ~0);
+ }
+
+ /* configure delay interrupt off */
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2, 0);
+ }
+
+ if (dev->hif2) {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG0 + hif1_ofs, 0);
+ if (is_mt7915(mdev)) {
+ mt76_wr(dev, MT_WFDMA1_PRI_DLY_INT_CFG0 +
+ hif1_ofs, 0);
+ } else {
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG1 +
+ hif1_ofs, 0);
+ mt76_wr(dev, MT_WFDMA0_PRI_DLY_INT_CFG2 +
+ hif1_ofs, 0);
+ }
+ }
+
+ /* configure perfetch settings */
+ mt7915_dma_prefetch(dev);
+
+ /* hif wait WFDMA idle */
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA,
+ MT_WFDMA0_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA,
+ MT_WFDMA1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_BUSY_ENA_RX_FIFO);
+
+ if (dev->hif2) {
+ mt76_set(dev, MT_WFDMA0_BUSY_ENA + hif1_ofs,
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO);
+
+ if (is_mt7915(mdev))
+ mt76_set(dev, MT_WFDMA1_BUSY_ENA + hif1_ofs,
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 |
+ MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO);
+ }
+
+ mt76_poll(dev, MT_WFDMA_EXT_CSR_HIF_MISC,
+ MT_WFDMA_EXT_CSR_HIF_MISC_BUSY, 0, 1000);
+
+ return mt7915_dma_start(dev, reset, true);
+}
+
+int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 wa_rx_base, wa_rx_idx;
+ u32 hif1_ofs = 0;
+ int ret;
+
+ mt7915_dma_config(dev);
+
+ mt76_dma_attach(&dev->mt76);
+
+ if (dev->hif2)
+ hif1_ofs = MT_WFDMA0_PCIE1(0) - MT_WFDMA0(0);
+
+ mt7915_dma_disable(dev, true);
+
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ if (!is_mt798x(mdev)) {
+ u8 wed_control_rx1 = is_mt7915(mdev) ? 1 : 2;
+
+ mt76_set(dev, MT_WFDMA_HOST_CONFIG,
+ MT_WFDMA_HOST_CONFIG_WED);
+ mt76_wr(dev, MT_WFDMA_WED_RING_CONTROL,
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX0, 18) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_TX1, 19) |
+ FIELD_PREP(MT_WFDMA_WED_RING_CONTROL_RX1,
+ wed_control_rx1));
+ if (is_mt7915(mdev))
+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
+ MT_WFDMA0_EXT0_RXWB_KEEP);
+ }
+ } else {
+ mt76_clear(dev, MT_WFDMA_HOST_CONFIG, MT_WFDMA_HOST_CONFIG_WED);
+ }
+
+ /* init tx queue */
+ ret = mt7915_init_tx_queues(&dev->phy,
+ MT_TXQ_ID(dev->phy.mt76->band_idx),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(0));
+ if (ret)
+ return ret;
+
+ if (phy2) {
+ ret = mt7915_init_tx_queues(phy2,
+ MT_TXQ_ID(phy2->mt76->band_idx),
+ MT7915_TX_RING_SIZE,
+ MT_TXQ_RING_BASE(1));
+ if (ret)
+ return ret;
+ }
+
+ /* command to WM */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WM,
+ MT_MCUQ_ID(MT_MCUQ_WM),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WM));
+ if (ret)
+ return ret;
+
+ /* command to WA */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_WA,
+ MT_MCUQ_ID(MT_MCUQ_WA),
+ MT7915_TX_MCU_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_WA));
+ if (ret)
+ return ret;
+
+ /* firmware download */
+ ret = mt76_init_mcu_queue(&dev->mt76, MT_MCUQ_FWDL,
+ MT_MCUQ_ID(MT_MCUQ_FWDL),
+ MT7915_TX_FWDL_RING_SIZE,
+ MT_MCUQ_RING_BASE(MT_MCUQ_FWDL));
+ if (ret)
+ return ret;
+
+ /* event from WM */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU],
+ MT_RXQ_ID(MT_RXQ_MCU),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MCU));
+ if (ret)
+ return ret;
+
+ /* event from WA */
+ if (mtk_wed_device_active(&mdev->mmio.wed) && is_mt7915(mdev)) {
+ wa_rx_base = MT_WED_RX_RING_BASE;
+ wa_rx_idx = MT7915_RXQ_MCU_WA;
+ dev->mt76.q_rx[MT_RXQ_MCU_WA].flags = MT_WED_Q_TXFREE;
+ } else {
+ wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MCU_WA);
+ wa_rx_idx = MT_RXQ_ID(MT_RXQ_MCU_WA);
+ }
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MCU_WA],
+ wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE, wa_rx_base);
+ if (ret)
+ return ret;
+
+ /* rx data queue for band0 */
+ if (!dev->phy.mt76->band_idx) {
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
+ dev->mt76.q_rx[MT_RXQ_MAIN].flags =
+ MT_WED_Q_RX(MT7915_RXQ_BAND0);
+ dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ }
+
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN],
+ MT_RXQ_ID(MT_RXQ_MAIN),
+ MT7915_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_MAIN));
+ if (ret)
+ return ret;
+ }
+
+ /* tx free notify event from WA for band0 */
+ if (!is_mt7915(mdev)) {
+ wa_rx_base = MT_RXQ_RING_BASE(MT_RXQ_MAIN_WA);
+ wa_rx_idx = MT_RXQ_ID(MT_RXQ_MAIN_WA);
+
+ if (mtk_wed_device_active(&mdev->mmio.wed)) {
+ mdev->q_rx[MT_RXQ_MAIN_WA].flags = MT_WED_Q_TXFREE;
+ if (is_mt7916(mdev)) {
+ wa_rx_base = MT_WED_RX_RING_BASE;
+ wa_rx_idx = MT7915_RXQ_MCU_WA;
+ }
+ }
+
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_MAIN_WA],
+ wa_rx_idx, MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE, wa_rx_base);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->dbdc_support || dev->phy.mt76->band_idx) {
+ if (mtk_wed_device_active(&mdev->mmio.wed) &&
+ mtk_wed_get_rx_capa(&mdev->mmio.wed)) {
+ dev->mt76.q_rx[MT_RXQ_BAND1].flags =
+ MT_WED_Q_RX(MT7915_RXQ_BAND1);
+ dev->mt76.rx_token_size += MT7915_RX_RING_SIZE;
+ }
+
+ /* rx data queue for band1 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1],
+ MT_RXQ_ID(MT_RXQ_BAND1),
+ MT7915_RX_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_BAND1) + hif1_ofs);
+ if (ret)
+ return ret;
+
+ /* tx free notify event from WA for band1 */
+ ret = mt76_queue_alloc(dev, &dev->mt76.q_rx[MT_RXQ_BAND1_WA],
+ MT_RXQ_ID(MT_RXQ_BAND1_WA),
+ MT7915_RX_MCU_RING_SIZE,
+ MT_RX_BUF_SIZE,
+ MT_RXQ_RING_BASE(MT_RXQ_BAND1_WA) + hif1_ofs);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt76_init_queues(dev, mt76_dma_rx_poll);
+ if (ret < 0)
+ return ret;
+
+ netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ mt7915_poll_tx);
+ napi_enable(&dev->mt76.tx_napi);
+
+ mt7915_dma_enable(dev, false);
+
+ return 0;
+}
+
+static void mt7915_dma_wed_reset(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+
+ if (!test_bit(MT76_STATE_WED_RESET, &dev->mphy.state))
+ return;
+
+ complete(&mdev->mmio.wed_reset);
+
+ if (!wait_for_completion_timeout(&dev->mt76.mmio.wed_reset_complete,
+ 3 * HZ))
+ dev_err(dev->mt76.dev, "wed reset complete timeout\n");
+}
+
+static void
+mt7915_dma_reset_tx_queue(struct mt7915_dev *dev, struct mt76_queue *q)
+{
+ mt76_queue_reset(dev, q);
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt76_dma_wed_setup(&dev->mt76, q, true);
+}
+
+int mt7915_dma_reset(struct mt7915_dev *dev, bool force)
+{
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ int i;
+
+ /* clean up hw queues */
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.phy.q_tx); i++) {
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[i], true);
+ if (mphy_ext)
+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[i], true);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dev->mt76.q_mcu); i++)
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[i], true);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_cleanup(dev, &dev->mt76.q_rx[i]);
+
+ /* reset wfsys */
+ if (force)
+ mt7915_wfsys_reset(dev);
+
+ if (mtk_wed_device_active(wed))
+ mtk_wed_device_dma_reset(wed);
+
+ mt7915_dma_disable(dev, force);
+ mt7915_dma_wed_reset(dev);
+
+ /* reset hw queues */
+ for (i = 0; i < __MT_TXQ_MAX; i++) {
+ mt7915_dma_reset_tx_queue(dev, dev->mphy.q_tx[i]);
+ if (mphy_ext)
+ mt7915_dma_reset_tx_queue(dev, mphy_ext->q_tx[i]);
+ }
+
+ for (i = 0; i < __MT_MCUQ_MAX; i++)
+ mt76_queue_reset(dev, dev->mt76.q_mcu[i]);
+
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ if (dev->mt76.q_rx[i].flags == MT_WED_Q_TXFREE)
+ continue;
+
+ mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
+ }
+
+ mt76_tx_status_check(&dev->mt76, true);
+
+ mt76_for_each_q_rx(&dev->mt76, i)
+ mt76_queue_rx_reset(dev, i);
+
+ if (mtk_wed_device_active(wed) && is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_WFDMA0_EXT0_CFG, MT_WFDMA0_EXT0_RXWB_KEEP,
+ MT_WFDMA0_EXT0_RXWB_KEEP);
+
+ mt7915_dma_enable(dev, !force);
+
+ return 0;
+}
+
+void mt7915_dma_cleanup(struct mt7915_dev *dev)
+{
+ mt7915_dma_disable(dev, true);
+
+ mt76_dma_cleanup(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
new file mode 100644
index 0000000000..76be730846
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/firmware.h>
+#include "mt7915.h"
+#include "eeprom.h"
+
+static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ u8 *eeprom = mdev->eeprom.data;
+ u32 val = eeprom[MT_EE_DO_PRE_CAL];
+ u32 offs;
+
+ if (!dev->flash_mode)
+ return 0;
+
+ if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
+ return 0;
+
+ val = MT_EE_CAL_GROUP_SIZE + MT_EE_CAL_DPD_SIZE;
+ dev->cal = devm_kzalloc(mdev->dev, val, GFP_KERNEL);
+ if (!dev->cal)
+ return -ENOMEM;
+
+ offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2;
+
+ return mt76_get_of_eeprom(mdev, dev->cal, offs, val);
+}
+
+static int mt7915_check_eeprom(struct mt7915_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u16 val = get_unaligned_le16(eeprom);
+
+#define CHECK_EEPROM_ERR(match) (match ? 0 : -EINVAL)
+ switch (val) {
+ case 0x7915:
+ return CHECK_EEPROM_ERR(is_mt7915(&dev->mt76));
+ case 0x7916:
+ return CHECK_EEPROM_ERR(is_mt7916(&dev->mt76));
+ case 0x7981:
+ return CHECK_EEPROM_ERR(is_mt7981(&dev->mt76));
+ case 0x7986:
+ return CHECK_EEPROM_ERR(is_mt7986(&dev->mt76));
+ default:
+ return -EINVAL;
+ }
+}
+
+static char *mt7915_eeprom_name(struct mt7915_dev *dev)
+{
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ return dev->dbdc_support ?
+ MT7915_EEPROM_DEFAULT_DBDC : MT7915_EEPROM_DEFAULT;
+ case 0x7981:
+ /* mt7981 only supports mt7976 and only in DBDC mode */
+ return MT7981_EEPROM_MT7976_DEFAULT_DBDC;
+ case 0x7986:
+ switch (mt7915_check_adie(dev, true)) {
+ case MT7976_ONE_ADIE_DBDC:
+ return MT7986_EEPROM_MT7976_DEFAULT_DBDC;
+ case MT7975_ONE_ADIE:
+ return MT7986_EEPROM_MT7975_DEFAULT;
+ case MT7976_ONE_ADIE:
+ return MT7986_EEPROM_MT7976_DEFAULT;
+ case MT7975_DUAL_ADIE:
+ return MT7986_EEPROM_MT7975_DUAL_DEFAULT;
+ case MT7976_DUAL_ADIE:
+ return MT7986_EEPROM_MT7976_DUAL_DEFAULT;
+ default:
+ break;
+ }
+ return NULL;
+ default:
+ return MT7916_EEPROM_DEFAULT;
+ }
+}
+
+static int
+mt7915_eeprom_load_default(struct mt7915_dev *dev)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ const struct firmware *fw = NULL;
+ int ret;
+
+ ret = request_firmware(&fw, mt7915_eeprom_name(dev), dev->mt76.dev);
+ if (ret)
+ return ret;
+
+ if (!fw || !fw->data) {
+ dev_err(dev->mt76.dev, "Invalid default bin\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(eeprom, fw->data, mt7915_eeprom_size(dev));
+ dev->flash_mode = true;
+
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static int mt7915_eeprom_load(struct mt7915_dev *dev)
+{
+ int ret;
+ u16 eeprom_size = mt7915_eeprom_size(dev);
+
+ ret = mt76_eeprom_init(&dev->mt76, eeprom_size);
+ if (ret < 0)
+ return ret;
+
+ if (ret) {
+ dev->flash_mode = true;
+ } else {
+ u8 free_block_num;
+ u32 block_num, i;
+ u32 eeprom_blk_size = MT7915_EEPROM_BLOCK_SIZE;
+
+ ret = mt7915_mcu_get_eeprom_free_block(dev, &free_block_num);
+ if (ret < 0)
+ return ret;
+
+ /* efuse info isn't enough */
+ if (free_block_num >= 29)
+ return -EINVAL;
+
+ /* read eeprom data from efuse */
+ block_num = DIV_ROUND_UP(eeprom_size, eeprom_blk_size);
+ for (i = 0; i < block_num; i++) {
+ ret = mt7915_mcu_get_eeprom(dev, i * eeprom_blk_size);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return mt7915_check_eeprom(dev);
+}
+
+static void mt7915_eeprom_parse_band_config(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u8 band = phy->mt76->band_idx;
+ u32 val;
+
+ val = eeprom[MT_EE_WIFI_CONF + band];
+ val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
+
+ if (!is_mt7915(&dev->mt76)) {
+ switch (val) {
+ case MT_EE_V2_BAND_SEL_5GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ return;
+ case MT_EE_V2_BAND_SEL_6GHZ:
+ phy->mt76->cap.has_6ghz = true;
+ return;
+ case MT_EE_V2_BAND_SEL_5GHZ_6GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ phy->mt76->cap.has_6ghz = true;
+ return;
+ default:
+ phy->mt76->cap.has_2ghz = true;
+ return;
+ }
+ } else if (val == MT_EE_BAND_SEL_DEFAULT && dev->dbdc_support) {
+ val = band ? MT_EE_BAND_SEL_5GHZ : MT_EE_BAND_SEL_2GHZ;
+ }
+
+ switch (val) {
+ case MT_EE_BAND_SEL_5GHZ:
+ phy->mt76->cap.has_5ghz = true;
+ break;
+ case MT_EE_BAND_SEL_2GHZ:
+ phy->mt76->cap.has_2ghz = true;
+ break;
+ default:
+ phy->mt76->cap.has_2ghz = true;
+ phy->mt76->cap.has_5ghz = true;
+ break;
+ }
+}
+
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy)
+{
+ u8 path, nss, nss_max = 4, *eeprom = dev->mt76.eeprom.data;
+ struct mt76_phy *mphy = phy->mt76;
+ u8 band = phy->mt76->band_idx;
+
+ mt7915_eeprom_parse_band_config(phy);
+
+ /* read tx/rx path from eeprom */
+ if (is_mt7915(&dev->mt76)) {
+ path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF]);
+ } else {
+ path = FIELD_GET(MT_EE_WIFI_CONF0_TX_PATH,
+ eeprom[MT_EE_WIFI_CONF + band]);
+ }
+
+ if (!path || path > 4)
+ path = 4;
+
+ /* read tx/rx stream */
+ nss = path;
+ if (dev->dbdc_support) {
+ if (is_mt7915(&dev->mt76)) {
+ path = min_t(u8, path, 2);
+ nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B0,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ if (band)
+ nss = FIELD_GET(MT_EE_WIFI_CONF3_TX_PATH_B1,
+ eeprom[MT_EE_WIFI_CONF + 3]);
+ } else {
+ nss = FIELD_GET(MT_EE_WIFI_CONF_STREAM_NUM,
+ eeprom[MT_EE_WIFI_CONF + 2 + band]);
+ }
+
+ if (!is_mt798x(&dev->mt76))
+ nss_max = 2;
+ }
+
+ if (!nss)
+ nss = nss_max;
+ nss = min_t(u8, min_t(u8, nss_max, nss), path);
+
+ mphy->chainmask = BIT(path) - 1;
+ if (band)
+ mphy->chainmask <<= dev->chainshift;
+ mphy->antenna_mask = BIT(nss) - 1;
+ dev->chainmask |= mphy->chainmask;
+ dev->chainshift = hweight8(dev->mphy.chainmask);
+}
+
+int mt7915_eeprom_init(struct mt7915_dev *dev)
+{
+ int ret;
+
+ ret = mt7915_eeprom_load(dev);
+ if (ret < 0) {
+ if (ret != -EINVAL)
+ return ret;
+
+ dev_warn(dev->mt76.dev, "eeprom load fail, use default bin\n");
+ ret = mt7915_eeprom_load_default(dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7915_eeprom_load_precal(dev);
+ if (ret)
+ return ret;
+
+ mt7915_eeprom_parse_hw_cap(dev, &dev->phy);
+ memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+
+ mt76_eeprom_override(&dev->mphy);
+
+ return 0;
+}
+
+int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ int index, target_power;
+ bool tssi_on, is_7976;
+
+ if (chain_idx > 3)
+ return -EINVAL;
+
+ tssi_on = mt7915_tssi_enabled(dev, chan->band);
+ is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
+
+ if (chan->band == NL80211_BAND_2GHZ) {
+ if (is_7976) {
+ index = MT_EE_TX0_POWER_2G_V2 + chain_idx;
+ target_power = eeprom[index];
+ } else {
+ index = MT_EE_TX0_POWER_2G + chain_idx * 3;
+ target_power = eeprom[index];
+
+ if (!tssi_on)
+ target_power += eeprom[index + 1];
+ }
+ } else if (chan->band == NL80211_BAND_5GHZ) {
+ int group = mt7915_get_channel_group_5g(chan->hw_value, is_7976);
+
+ if (is_7976) {
+ index = MT_EE_TX0_POWER_5G_V2 + chain_idx * 5;
+ target_power = eeprom[index + group];
+ } else {
+ index = MT_EE_TX0_POWER_5G + chain_idx * 12;
+ target_power = eeprom[index + group];
+
+ if (!tssi_on)
+ target_power += eeprom[index + 8];
+ }
+ } else {
+ int group = mt7915_get_channel_group_6g(chan->hw_value);
+
+ index = MT_EE_TX0_POWER_6G_V2 + chain_idx * 8;
+ target_power = is_7976 ? eeprom[index + group] : 0;
+ }
+
+ return target_power;
+}
+
+s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band)
+{
+ u8 *eeprom = dev->mt76.eeprom.data;
+ u32 val, offs;
+ s8 delta;
+ bool is_7976 = mt7915_check_adie(dev, false) || is_mt7916(&dev->mt76);
+
+ if (band == NL80211_BAND_2GHZ)
+ offs = is_7976 ? MT_EE_RATE_DELTA_2G_V2 : MT_EE_RATE_DELTA_2G;
+ else if (band == NL80211_BAND_5GHZ)
+ offs = is_7976 ? MT_EE_RATE_DELTA_5G_V2 : MT_EE_RATE_DELTA_5G;
+ else
+ offs = is_7976 ? MT_EE_RATE_DELTA_6G_V2 : 0;
+
+ val = eeprom[offs];
+
+ if (!offs || !(val & MT_EE_RATE_DELTA_EN))
+ return 0;
+
+ delta = FIELD_GET(MT_EE_RATE_DELTA_MASK, val);
+
+ return val & MT_EE_RATE_DELTA_SIGN ? delta : -delta;
+}
+
+const u8 mt7915_sku_group_len[] = {
+ [SKU_CCK] = 4,
+ [SKU_OFDM] = 8,
+ [SKU_HT_BW20] = 8,
+ [SKU_HT_BW40] = 9,
+ [SKU_VHT_BW20] = 12,
+ [SKU_VHT_BW40] = 12,
+ [SKU_VHT_BW80] = 12,
+ [SKU_VHT_BW160] = 12,
+ [SKU_HE_RU26] = 12,
+ [SKU_HE_RU52] = 12,
+ [SKU_HE_RU106] = 12,
+ [SKU_HE_RU242] = 12,
+ [SKU_HE_RU484] = 12,
+ [SKU_HE_RU996] = 12,
+ [SKU_HE_RU2x996] = 12
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
new file mode 100644
index 0000000000..adc26a2228
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_EEPROM_H
+#define __MT7915_EEPROM_H
+
+#include "mt7915.h"
+
+struct cal_data {
+ u8 count;
+ u16 offset[60];
+};
+
+enum mt7915_eeprom_field {
+ MT_EE_CHIP_ID = 0x000,
+ MT_EE_VERSION = 0x002,
+ MT_EE_MAC_ADDR = 0x004,
+ MT_EE_MAC_ADDR2 = 0x00a,
+ MT_EE_DDIE_FT_VERSION = 0x050,
+ MT_EE_DO_PRE_CAL = 0x062,
+ MT_EE_WIFI_CONF = 0x190,
+ MT_EE_RATE_DELTA_2G = 0x252,
+ MT_EE_RATE_DELTA_5G = 0x29d,
+ MT_EE_TX0_POWER_2G = 0x2fc,
+ MT_EE_TX0_POWER_5G = 0x34b,
+ MT_EE_RATE_DELTA_2G_V2 = 0x7d3,
+ MT_EE_RATE_DELTA_5G_V2 = 0x81e,
+ MT_EE_RATE_DELTA_6G_V2 = 0x884, /* 6g fields only appear in eeprom v2 */
+ MT_EE_TX0_POWER_2G_V2 = 0x441,
+ MT_EE_TX0_POWER_5G_V2 = 0x445,
+ MT_EE_TX0_POWER_6G_V2 = 0x465,
+ MT_EE_ADIE_FT_VERSION = 0x9a0,
+
+ __MT_EE_MAX = 0xe00,
+ __MT_EE_MAX_V2 = 0x1000,
+ /* 0xe10 ~ 0x5780 used to save group cal data */
+ MT_EE_PRECAL = 0xe10,
+ MT_EE_PRECAL_V2 = 0x1010
+};
+
+#define MT_EE_WIFI_CAL_GROUP BIT(0)
+#define MT_EE_WIFI_CAL_DPD GENMASK(2, 1)
+#define MT_EE_CAL_UNIT 1024
+#define MT_EE_CAL_GROUP_SIZE (49 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT)
+
+#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
+#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF1_BAND_SEL GENMASK(7, 6)
+#define MT_EE_WIFI_CONF_STREAM_NUM GENMASK(7, 5)
+#define MT_EE_WIFI_CONF3_TX_PATH_B0 GENMASK(1, 0)
+#define MT_EE_WIFI_CONF3_TX_PATH_B1 GENMASK(5, 4)
+#define MT_EE_WIFI_CONF7_TSSI0_2G BIT(0)
+#define MT_EE_WIFI_CONF7_TSSI0_5G BIT(2)
+#define MT_EE_WIFI_CONF7_TSSI1_5G BIT(4)
+
+#define MT_EE_RATE_DELTA_MASK GENMASK(5, 0)
+#define MT_EE_RATE_DELTA_SIGN BIT(6)
+#define MT_EE_RATE_DELTA_EN BIT(7)
+
+enum mt7915_adie_sku {
+ MT7976_ONE_ADIE_DBDC = 0x7,
+ MT7975_ONE_ADIE = 0x8,
+ MT7976_ONE_ADIE = 0xa,
+ MT7975_DUAL_ADIE = 0xd,
+ MT7976_DUAL_ADIE = 0xf,
+};
+
+enum mt7915_eeprom_band {
+ MT_EE_BAND_SEL_DEFAULT,
+ MT_EE_BAND_SEL_5GHZ,
+ MT_EE_BAND_SEL_2GHZ,
+ MT_EE_BAND_SEL_DUAL,
+};
+
+enum {
+ MT_EE_V2_BAND_SEL_2GHZ,
+ MT_EE_V2_BAND_SEL_5GHZ,
+ MT_EE_V2_BAND_SEL_6GHZ,
+ MT_EE_V2_BAND_SEL_5GHZ_6GHZ,
+};
+
+enum mt7915_sku_rate_group {
+ SKU_CCK,
+ SKU_OFDM,
+ SKU_HT_BW20,
+ SKU_HT_BW40,
+ SKU_VHT_BW20,
+ SKU_VHT_BW40,
+ SKU_VHT_BW80,
+ SKU_VHT_BW160,
+ SKU_HE_RU26,
+ SKU_HE_RU52,
+ SKU_HE_RU106,
+ SKU_HE_RU242,
+ SKU_HE_RU484,
+ SKU_HE_RU996,
+ SKU_HE_RU2x996,
+ MAX_SKU_RATE_GROUP_NUM,
+};
+
+static inline int
+mt7915_get_channel_group_5g(int channel, bool is_7976)
+{
+ if (is_7976) {
+ if (channel <= 64)
+ return 0;
+ if (channel <= 96)
+ return 1;
+ if (channel <= 128)
+ return 2;
+ if (channel <= 144)
+ return 3;
+ return 4;
+ }
+
+ if (channel >= 184 && channel <= 196)
+ return 0;
+ if (channel <= 48)
+ return 1;
+ if (channel <= 64)
+ return 2;
+ if (channel <= 96)
+ return 3;
+ if (channel <= 112)
+ return 4;
+ if (channel <= 128)
+ return 5;
+ if (channel <= 144)
+ return 6;
+ return 7;
+}
+
+static inline int
+mt7915_get_channel_group_6g(int channel)
+{
+ if (channel <= 29)
+ return 0;
+
+ return DIV_ROUND_UP(channel - 29, 32);
+}
+
+static inline bool
+mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
+{
+ u8 *eep = dev->mt76.eeprom.data;
+ u8 offs = is_mt7981(&dev->mt76) ? 8 : 7;
+ u8 val = eep[MT_EE_WIFI_CONF + offs];
+
+ if (band == NL80211_BAND_2GHZ)
+ return val & MT_EE_WIFI_CONF7_TSSI0_2G;
+
+ if (dev->dbdc_support)
+ return val & MT_EE_WIFI_CONF7_TSSI1_5G;
+ else
+ return val & MT_EE_WIFI_CONF7_TSSI0_5G;
+}
+
+extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM];
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
new file mode 100644
index 0000000000..35fdf4f98d
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -0,0 +1,1261 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/of.h>
+#include <linux/thermal.h>
+#include "mt7915.h"
+#include "mac.h"
+#include "mcu.h"
+#include "coredump.h"
+#include "eeprom.h"
+
+static const struct ieee80211_iface_limit if_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_ADHOC)
+ }, {
+ .max = 16,
+ .types = BIT(NL80211_IFTYPE_AP)
+#ifdef CONFIG_MAC80211_MESH
+ | BIT(NL80211_IFTYPE_MESH_POINT)
+#endif
+ }, {
+ .max = MT7915_MAX_INTERFACES,
+ .types = BIT(NL80211_IFTYPE_STATION)
+ }
+};
+
+static const struct ieee80211_iface_combination if_comb[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = MT7915_MAX_INTERFACES,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ }
+};
+
+static ssize_t mt7915_thermal_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mt7915_phy *phy = dev_get_drvdata(dev);
+ int i = to_sensor_dev_attr(attr)->index;
+ int temperature;
+
+ switch (i) {
+ case 0:
+ temperature = mt7915_mcu_get_temperature(phy);
+ if (temperature < 0)
+ return temperature;
+ /* display in millidegree celcius */
+ return sprintf(buf, "%u\n", temperature * 1000);
+ case 1:
+ case 2:
+ return sprintf(buf, "%u\n",
+ phy->throttle_temp[i - 1] * 1000);
+ case 3:
+ return sprintf(buf, "%hhu\n", phy->throttle_state);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t mt7915_thermal_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mt7915_phy *phy = dev_get_drvdata(dev);
+ int ret, i = to_sensor_dev_attr(attr)->index;
+ long val;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&phy->dev->mt76.mutex);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130);
+
+ if ((i - 1 == MT7915_CRIT_TEMP_IDX &&
+ val > phy->throttle_temp[MT7915_MAX_TEMP_IDX]) ||
+ (i - 1 == MT7915_MAX_TEMP_IDX &&
+ val < phy->throttle_temp[MT7915_CRIT_TEMP_IDX])) {
+ dev_err(phy->dev->mt76.dev,
+ "temp1_max shall be greater than temp1_crit.");
+ mutex_unlock(&phy->dev->mt76.mutex);
+ return -EINVAL;
+ }
+
+ phy->throttle_temp[i - 1] = val;
+ mutex_unlock(&phy->dev->mt76.mutex);
+
+ ret = mt7915_mcu_set_thermal_protect(phy);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7915_thermal_temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, mt7915_thermal_temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, mt7915_thermal_temp, 2);
+static SENSOR_DEVICE_ATTR_RO(throttle1, mt7915_thermal_temp, 3);
+
+static struct attribute *mt7915_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_throttle1.dev_attr.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mt7915_hwmon);
+
+static int
+mt7915_thermal_get_max_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = MT7915_CDEV_THROTTLE_MAX;
+
+ return 0;
+}
+
+static int
+mt7915_thermal_get_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct mt7915_phy *phy = cdev->devdata;
+
+ *state = phy->cdev_state;
+
+ return 0;
+}
+
+static int
+mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct mt7915_phy *phy = cdev->devdata;
+ u8 throttling = MT7915_THERMAL_THROTTLE_MAX - state;
+ int ret;
+
+ if (state > MT7915_CDEV_THROTTLE_MAX) {
+ dev_err(phy->dev->mt76.dev,
+ "please specify a valid throttling state\n");
+ return -EINVAL;
+ }
+
+ if (state == phy->cdev_state)
+ return 0;
+
+ /*
+ * cooling_device convention: 0 = no cooling, more = more cooling
+ * mcu convention: 1 = max cooling, more = less cooling
+ */
+ ret = mt7915_mcu_set_thermal_throttling(phy, throttling);
+ if (ret)
+ return ret;
+
+ phy->cdev_state = state;
+
+ return 0;
+}
+
+static const struct thermal_cooling_device_ops mt7915_thermal_ops = {
+ .get_max_state = mt7915_thermal_get_max_throttle_state,
+ .get_cur_state = mt7915_thermal_get_cur_throttle_state,
+ .set_cur_state = mt7915_thermal_set_cur_throttle_state,
+};
+
+static void mt7915_unregister_thermal(struct mt7915_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+
+ if (!phy->cdev)
+ return;
+
+ sysfs_remove_link(&wiphy->dev.kobj, "cooling_device");
+ thermal_cooling_device_unregister(phy->cdev);
+}
+
+static int mt7915_thermal_init(struct mt7915_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ struct thermal_cooling_device *cdev;
+ struct device *hwmon;
+ const char *name;
+
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s",
+ wiphy_name(wiphy));
+
+ cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops);
+ if (!IS_ERR(cdev)) {
+ if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj,
+ "cooling_device") < 0)
+ thermal_cooling_device_unregister(cdev);
+ else
+ phy->cdev = cdev;
+ }
+
+ /* initialize critical/maximum high temperature */
+ phy->throttle_temp[MT7915_CRIT_TEMP_IDX] = MT7915_CRIT_TEMP;
+ phy->throttle_temp[MT7915_MAX_TEMP_IDX] = MT7915_MAX_TEMP;
+
+ if (!IS_REACHABLE(CONFIG_HWMON))
+ return 0;
+
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
+ mt7915_hwmon_groups);
+ if (IS_ERR(hwmon))
+ return PTR_ERR(hwmon);
+
+ return 0;
+}
+
+static void mt7915_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7915_dev *dev;
+ struct mt76_phy *mphy;
+ u32 val;
+
+ mphy = container_of(led_cdev, struct mt76_phy, leds.cdev);
+ dev = container_of(mphy->dev, struct mt7915_dev, mt76);
+
+ /* set PWM mode */
+ val = FIELD_PREP(MT_LED_STATUS_DURATION, 0xffff) |
+ FIELD_PREP(MT_LED_STATUS_OFF, delay_off) |
+ FIELD_PREP(MT_LED_STATUS_ON, delay_on);
+ mt76_wr(dev, MT_LED_STATUS_0(mphy->band_idx), val);
+ mt76_wr(dev, MT_LED_STATUS_1(mphy->band_idx), val);
+
+ /* enable LED */
+ mt76_wr(dev, MT_LED_EN(mphy->band_idx), 1);
+
+ /* control LED */
+ val = MT_LED_CTRL_KICK;
+ if (dev->mphy.leds.al)
+ val |= MT_LED_CTRL_POLARITY;
+ if (mphy->band_idx)
+ val |= MT_LED_CTRL_BAND;
+
+ mt76_wr(dev, MT_LED_CTRL(mphy->band_idx), val);
+ mt76_clear(dev, MT_LED_CTRL(mphy->band_idx), MT_LED_CTRL_KICK);
+}
+
+static int mt7915_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 delta_on = 0, delta_off = 0;
+
+#define HW_TICK 10
+#define TO_HW_TICK(_t) (((_t) > HW_TICK) ? ((_t) / HW_TICK) : HW_TICK)
+
+ if (*delay_on)
+ delta_on = TO_HW_TICK(*delay_on);
+ if (*delay_off)
+ delta_off = TO_HW_TICK(*delay_off);
+
+ mt7915_led_set_config(led_cdev, delta_on, delta_off);
+
+ return 0;
+}
+
+static void mt7915_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7915_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7915_led_set_config(led_cdev, 0xff, 0);
+}
+
+void mt7915_init_txpower(struct mt7915_dev *dev,
+ struct ieee80211_supported_band *sband)
+{
+ int i, n_chains = hweight8(dev->mphy.antenna_mask);
+ int nss_delta = mt76_tx_power_nss_delta(n_chains);
+ int pwr_delta = mt7915_eeprom_get_power_delta(dev, sband->band);
+ struct mt76_power_limits limits;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *chan = &sband->channels[i];
+ u32 target_power = 0;
+ int j;
+
+ for (j = 0; j < n_chains; j++) {
+ u32 val;
+
+ val = mt7915_eeprom_get_target_power(dev, chan, j);
+ target_power = max(target_power, val);
+ }
+
+ target_power += pwr_delta;
+ target_power = mt76_get_rate_power_limits(&dev->mphy, chan,
+ &limits,
+ target_power);
+ target_power += nss_delta;
+ target_power = DIV_ROUND_UP(target_power, 2);
+ chan->max_power = min_t(int, chan->max_reg_power,
+ target_power);
+ chan->orig_mpwr = target_power;
+ }
+}
+
+static void
+mt7915_regd_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct mt7915_phy *phy = mphy->priv;
+
+ memcpy(dev->mt76.alpha2, request->alpha2, sizeof(dev->mt76.alpha2));
+ dev->mt76.region = request->dfs_region;
+
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ mt7915_mcu_rdd_background_enable(phy, NULL);
+
+ mt7915_init_txpower(dev, &mphy->sband_2g.sband);
+ mt7915_init_txpower(dev, &mphy->sband_5g.sband);
+ mt7915_init_txpower(dev, &mphy->sband_6g.sband);
+
+ mphy->dfs_state = MT_DFS_STATE_UNKNOWN;
+ mt7915_dfs_init_radar_detector(phy);
+}
+
+static void
+mt7915_init_wiphy(struct mt7915_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct mt76_dev *mdev = &phy->dev->mt76;
+ struct wiphy *wiphy = hw->wiphy;
+ struct mt7915_dev *dev = phy->dev;
+
+ hw->queues = 4;
+ hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_HE;
+ hw->netdev_features = NETIF_F_RXCSUM;
+
+ hw->radiotap_timestamp.units_pos =
+ IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US;
+
+ phy->slottime = 9;
+
+ hw->sta_data_size = sizeof(struct mt7915_sta);
+ hw->vif_data_size = sizeof(struct mt7915_vif);
+
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ wiphy->reg_notifier = mt7915_regd_notifier;
+ wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy->mbssid_max_interfaces = 16;
+
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+
+ if (!is_mt7915(&dev->mt76))
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+
+ if (!mdev->dev->of_node ||
+ !of_property_read_bool(mdev->dev->of_node,
+ "mediatek,disable-radar-background"))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_RADAR_BACKGROUND);
+
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID);
+ ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+
+ hw->max_tx_fragments = 4;
+
+ if (phy->mt76->cap.has_2ghz) {
+ phy->mt76->sband_2g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ phy->mt76->sband_2g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_4;
+ }
+
+ if (phy->mt76->cap.has_5ghz) {
+ struct ieee80211_sta_vht_cap *vht_cap;
+
+ vht_cap = &phy->mt76->sband_5g.sband.vht_cap;
+ phy->mt76->sband_5g.sband.ht_cap.cap |=
+ IEEE80211_HT_CAP_LDPC_CODING |
+ IEEE80211_HT_CAP_MAX_AMSDU;
+ phy->mt76->sband_5g.sband.ht_cap.ampdu_density =
+ IEEE80211_HT_MPDU_DENSITY_4;
+
+ if (is_mt7915(&dev->mt76)) {
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ if (!dev->dbdc_support)
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ FIELD_PREP(IEEE80211_VHT_CAP_EXT_NSS_BW_MASK, 1);
+ } else {
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+
+ /* mt7916 dbdc with 2g 2x2 bw40 and 5g 2x2 bw160c */
+ vht_cap->cap |=
+ IEEE80211_VHT_CAP_SHORT_GI_160 |
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
+ }
+
+ if (!is_mt7915(&dev->mt76) || !dev->dbdc_support)
+ ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
+ }
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7915_set_stream_vht_txbf_caps(phy);
+ mt7915_set_stream_he_caps(phy);
+
+ wiphy->available_antennas_rx = phy->mt76->antenna_mask;
+ wiphy->available_antennas_tx = phy->mt76->antenna_mask;
+
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ mphy->leds.cdev.brightness_set = mt7915_led_set_brightness;
+ mphy->leds.cdev.blink_set = mt7915_led_set_blink;
+ }
+}
+
+static void
+mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
+{
+ u32 mask, set;
+
+ mt76_rmw_field(dev, MT_TMAC_CTCR0(band),
+ MT_TMAC_CTCR0_INS_DDLMT_REFTIME, 0x3f);
+ mt76_set(dev, MT_TMAC_CTCR0(band),
+ MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN |
+ MT_TMAC_CTCR0_INS_DDLMT_EN);
+
+ mask = MT_MDP_RCFR0_MCU_RX_MGMT |
+ MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR |
+ MT_MDP_RCFR0_MCU_RX_CTL_BAR;
+ set = FIELD_PREP(MT_MDP_RCFR0_MCU_RX_MGMT, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR0_MCU_RX_CTL_BAR, MT_MDP_TO_HIF);
+ mt76_rmw(dev, MT_MDP_BNRCFR0(band), mask, set);
+
+ mask = MT_MDP_RCFR1_MCU_RX_BYPASS |
+ MT_MDP_RCFR1_RX_DROPPED_UCAST |
+ MT_MDP_RCFR1_RX_DROPPED_MCAST;
+ set = FIELD_PREP(MT_MDP_RCFR1_MCU_RX_BYPASS, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_UCAST, MT_MDP_TO_HIF) |
+ FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
+ mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
+
+ mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 0x680);
+
+ /* mt7915: disable rx rate report by default due to hw issues */
+ mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
+
+ /* clear estimated value of EIFS for Rx duration & OBSS time */
+ mt76_wr(dev, MT_WF_RMAC_RSVD0(band), MT_WF_RMAC_RSVD0_EIFS_CLR);
+
+ /* clear backoff time for Rx duration */
+ mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME1(band),
+ MT_WF_RMAC_MIB_NONQOSD_BACKOFF);
+ mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME3(band),
+ MT_WF_RMAC_MIB_QOS01_BACKOFF);
+ mt76_clear(dev, MT_WF_RMAC_MIB_AIRTIME4(band),
+ MT_WF_RMAC_MIB_QOS23_BACKOFF);
+
+ /* clear backoff time and set software compensation for OBSS time */
+ mask = MT_WF_RMAC_MIB_OBSS_BACKOFF | MT_WF_RMAC_MIB_ED_OFFSET;
+ set = FIELD_PREP(MT_WF_RMAC_MIB_OBSS_BACKOFF, 0) |
+ FIELD_PREP(MT_WF_RMAC_MIB_ED_OFFSET, 4);
+ mt76_rmw(dev, MT_WF_RMAC_MIB_AIRTIME0(band), mask, set);
+
+ /* filter out non-resp frames and get instanstaeous signal reporting */
+ mask = MT_WTBLOFF_TOP_RSCR_RCPI_MODE | MT_WTBLOFF_TOP_RSCR_RCPI_PARAM;
+ set = FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_MODE, 0) |
+ FIELD_PREP(MT_WTBLOFF_TOP_RSCR_RCPI_PARAM, 0x3);
+ mt76_rmw(dev, MT_WTBLOFF_TOP_RSCR(band), mask, set);
+
+ /* MT_TXD5_TX_STATUS_HOST (MPDU format) has higher priority than
+ * MT_AGG_ACR_PPDU_TXS2H (PPDU format) even though ACR bit is set.
+ */
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed))
+ mt76_set(dev, MT_AGG_ACR4(band), MT_AGG_ACR_PPDU_TXS2H);
+}
+
+static void
+mt7915_init_led_mux(struct mt7915_dev *dev)
+{
+ if (!IS_ENABLED(CONFIG_MT76_LEDS))
+ return;
+
+ if (dev->dbdc_support) {
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX2,
+ GENMASK(11, 8), 4);
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX3,
+ GENMASK(11, 8), 4);
+ break;
+ case 0x7986:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(7, 4), 1);
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(11, 8), 1);
+ break;
+ case 0x7916:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(27, 24), 3);
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(31, 28), 3);
+ break;
+ default:
+ break;
+ }
+ } else if (dev->mphy.leds.pin) {
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX3,
+ GENMASK(11, 8), 4);
+ break;
+ case 0x7986:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(11, 8), 1);
+ break;
+ case 0x7916:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(31, 28), 3);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (mt76_chip(&dev->mt76)) {
+ case 0x7915:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX2,
+ GENMASK(11, 8), 4);
+ break;
+ case 0x7986:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX0,
+ GENMASK(7, 4), 1);
+ break;
+ case 0x7916:
+ mt76_rmw_field(dev, MT_LED_GPIO_MUX1,
+ GENMASK(27, 24), 3);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void mt7915_mac_init(struct mt7915_dev *dev)
+{
+ int i;
+ u32 rx_len = is_mt7915(&dev->mt76) ? 0x400 : 0x680;
+
+ /* config pse qid6 wfdma port selection */
+ if (!is_mt7915(&dev->mt76) && dev->hif2)
+ mt76_rmw(dev, MT_WF_PP_TOP_RXQ_WFDMA_CF_5, 0,
+ MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK);
+
+ mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, rx_len);
+
+ if (!is_mt7915(&dev->mt76))
+ mt76_clear(dev, MT_MDP_DCR2, MT_MDP_DCR2_RX_TRANS_SHORT);
+ else
+ mt76_clear(dev, MT_PLE_HOST_RPT0, MT_PLE_HOST_RPT0_TX_LATENCY);
+
+ /* enable hardware de-agg */
+ mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
+
+ for (i = 0; i < mt7915_wtbl_size(dev); i++)
+ mt7915_mac_wtbl_update(dev, i,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ for (i = 0; i < 2; i++)
+ mt7915_mac_init_band(dev, i);
+
+ mt7915_init_led_mux(dev);
+}
+
+int mt7915_txbf_init(struct mt7915_dev *dev)
+{
+ int ret;
+
+ if (dev->dbdc_support) {
+ ret = mt7915_mcu_set_txbf(dev, MT_BF_MODULE_UPDATE);
+ if (ret)
+ return ret;
+ }
+
+ /* trigger sounding packets */
+ ret = mt7915_mcu_set_txbf(dev, MT_BF_SOUNDING_ON);
+ if (ret)
+ return ret;
+
+ /* enable eBF */
+ return mt7915_mcu_set_txbf(dev, MT_BF_TYPE_UPDATE);
+}
+
+static struct mt7915_phy *
+mt7915_alloc_ext_phy(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy;
+ struct mt76_phy *mphy;
+
+ if (!dev->dbdc_support)
+ return NULL;
+
+ mphy = mt76_alloc_phy(&dev->mt76, sizeof(*phy), &mt7915_ops, MT_BAND1);
+ if (!mphy)
+ return ERR_PTR(-ENOMEM);
+
+ phy = mphy->priv;
+ phy->dev = dev;
+ phy->mt76 = mphy;
+
+ /* Bind main phy to band0 and ext_phy to band1 for dbdc case */
+ phy->mt76->band_idx = 1;
+
+ return phy;
+}
+
+static int
+mt7915_register_ext_phy(struct mt7915_dev *dev, struct mt7915_phy *phy)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ int ret;
+
+ INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
+
+ mt7915_eeprom_parse_hw_cap(dev, phy);
+
+ memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR2,
+ ETH_ALEN);
+ /* Make the secondary PHY MAC address local without overlapping with
+ * the usual MAC address allocation scheme on multiple virtual interfaces
+ */
+ if (!is_valid_ether_addr(mphy->macaddr)) {
+ memcpy(mphy->macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
+ ETH_ALEN);
+ mphy->macaddr[0] |= 2;
+ mphy->macaddr[0] ^= BIT(7);
+ }
+ mt76_eeprom_override(mphy);
+
+ /* init wiphy according to mphy and phy */
+ mt7915_init_wiphy(phy);
+
+ ret = mt76_register_phy(mphy, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret)
+ return ret;
+
+ ret = mt7915_thermal_init(phy);
+ if (ret)
+ goto unreg;
+
+ mt7915_init_debugfs(phy);
+
+ return 0;
+
+unreg:
+ mt76_unregister_phy(mphy);
+ return ret;
+}
+
+static void mt7915_init_work(struct work_struct *work)
+{
+ struct mt7915_dev *dev = container_of(work, struct mt7915_dev,
+ init_work);
+
+ mt7915_mcu_set_eeprom(dev);
+ mt7915_mac_init(dev);
+ mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ mt7915_init_txpower(dev, &dev->mphy.sband_6g.sband);
+ mt7915_txbf_init(dev);
+}
+
+void mt7915_wfsys_reset(struct mt7915_dev *dev)
+{
+#define MT_MCU_DUMMY_RANDOM GENMASK(15, 0)
+#define MT_MCU_DUMMY_DEFAULT GENMASK(31, 16)
+
+ if (is_mt7915(&dev->mt76)) {
+ u32 val = MT_TOP_PWR_KEY | MT_TOP_PWR_SW_PWR_ON | MT_TOP_PWR_PWR_ON;
+
+ mt76_wr(dev, MT_MCU_WFDMA0_DUMMY_CR, MT_MCU_DUMMY_RANDOM);
+
+ /* change to software control */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* reset wfsys */
+ val &= ~MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* release wfsys then mcu re-executes romcode */
+ val |= MT_TOP_PWR_SW_RST;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* switch to hw control */
+ val &= ~MT_TOP_PWR_SW_RST;
+ val |= MT_TOP_PWR_HW_CTRL;
+ mt76_wr(dev, MT_TOP_PWR_CTRL, val);
+
+ /* check whether mcu resets to default */
+ if (!mt76_poll_msec(dev, MT_MCU_WFDMA0_DUMMY_CR,
+ MT_MCU_DUMMY_DEFAULT, MT_MCU_DUMMY_DEFAULT,
+ 1000)) {
+ dev_err(dev->mt76.dev, "wifi subsystem reset failure\n");
+ return;
+ }
+
+ /* wfsys reset won't clear host registers */
+ mt76_clear(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE);
+
+ msleep(100);
+ } else if (is_mt798x(&dev->mt76)) {
+ mt7986_wmac_disable(dev);
+ msleep(20);
+
+ mt7986_wmac_enable(dev);
+ msleep(20);
+ } else {
+ mt76_set(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
+
+ mt76_clear(dev, MT_WF_SUBSYS_RST, 0x1);
+ msleep(20);
+ }
+}
+
+static bool mt7915_band_config(struct mt7915_dev *dev)
+{
+ bool ret = true;
+
+ dev->phy.mt76->band_idx = 0;
+
+ if (is_mt798x(&dev->mt76)) {
+ u32 sku = mt7915_check_adie(dev, true);
+
+ /*
+ * for mt7986, dbdc support is determined by the number
+ * of adie chips and the main phy is bound to band1 when
+ * dbdc is disabled.
+ */
+ if (sku == MT7975_ONE_ADIE || sku == MT7976_ONE_ADIE) {
+ dev->phy.mt76->band_idx = 1;
+ ret = false;
+ }
+ } else {
+ ret = is_mt7915(&dev->mt76) ?
+ !!(mt76_rr(dev, MT_HW_BOUND) & BIT(5)) : true;
+ }
+
+ return ret;
+}
+
+static int
+mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2)
+{
+ int ret, idx;
+
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ INIT_WORK(&dev->init_work, mt7915_init_work);
+
+ ret = mt7915_dma_init(dev, phy2);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ ret = mt7915_mcu_init(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7915_eeprom_init(dev);
+ if (ret < 0)
+ return ret;
+
+ if (dev->flash_mode) {
+ ret = mt7915_mcu_apply_group_cal(dev);
+ if (ret)
+ return ret;
+ }
+
+ /* Beacon and mgmt frames should occupy wcid 0 */
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
+ if (idx)
+ return -ENOSPC;
+
+ dev->mt76.global_wcid.idx = idx;
+ dev->mt76.global_wcid.hw_key_idx = -1;
+ dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
+
+ return 0;
+}
+
+void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy)
+{
+ int sts;
+ u32 *cap;
+
+ if (!phy->mt76->cap.has_5ghz)
+ return;
+
+ sts = hweight8(phy->mt76->chainmask);
+ cap = &phy->mt76->sband_5g.sband.vht_cap.cap;
+
+ *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ FIELD_PREP(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
+ sts - 1);
+
+ *cap &= ~(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK |
+ IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+
+ if (sts < 2)
+ return;
+
+ *cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE |
+ FIELD_PREP(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ sts - 1);
+}
+
+static void
+mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
+ struct ieee80211_sta_he_cap *he_cap, int vif)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ int sts = hweight8(phy->mt76->chainmask);
+ u8 c, sts_160 = sts;
+
+ /* Can do 1/2 of STS in 160Mhz mode for mt7915 */
+ if (is_mt7915(&dev->mt76)) {
+ if (!dev->dbdc_support)
+ sts_160 /= 2;
+ else
+ sts_160 = 0;
+ }
+
+#ifdef CONFIG_MAC80211_MESH
+ if (vif == NL80211_IFTYPE_MESH_POINT)
+ return;
+#endif
+
+ elem->phy_cap_info[3] &= ~IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+ elem->phy_cap_info[4] &= ~IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
+ c = IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
+ if (sts_160)
+ c |= IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
+ elem->phy_cap_info[5] &= ~c;
+
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
+ elem->phy_cap_info[6] &= ~c;
+
+ elem->phy_cap_info[7] &= ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK;
+
+ c = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US;
+ if (!is_mt7915(&dev->mt76))
+ c |= IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO;
+ elem->phy_cap_info[2] |= c;
+
+ c = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ if (sts_160)
+ c |= IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4;
+ elem->phy_cap_info[4] |= c;
+
+ /* do not support NG16 due to spec D4.0 changes subcarrier idx */
+ c = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU;
+
+ if (vif == NL80211_IFTYPE_STATION)
+ c |= IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO;
+
+ elem->phy_cap_info[6] |= c;
+
+ if (sts < 2)
+ return;
+
+ /* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
+ elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
+
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
+ c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ sts - 1);
+ if (sts_160)
+ c |= FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ sts_160 - 1);
+ elem->phy_cap_info[5] |= c;
+
+ c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
+ elem->phy_cap_info[6] |= c;
+
+ if (!is_mt7915(&dev->mt76)) {
+ c = IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ |
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ;
+ elem->phy_cap_info[7] |= c;
+ }
+}
+
+static int
+mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
+ struct ieee80211_sband_iftype_data *data)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int i, idx = 0, nss = hweight8(phy->mt76->antenna_mask);
+ u16 mcs_map = 0;
+ u16 mcs_map_160 = 0;
+ u8 nss_160;
+
+ if (!is_mt7915(&dev->mt76))
+ nss_160 = nss;
+ else if (!dev->dbdc_support)
+ /* Can do 1/2 of NSS streams in 160Mhz mode for mt7915 */
+ nss_160 = nss / 2;
+ else
+ /* Can't do 160MHz with mt7915 dbdc */
+ nss_160 = 0;
+
+ for (i = 0; i < 8; i++) {
+ if (i < nss)
+ mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
+ else
+ mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
+
+ if (i < nss_160)
+ mcs_map_160 |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
+ else
+ mcs_map_160 |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
+ }
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap = &data[idx].he_cap;
+ struct ieee80211_he_cap_elem *he_cap_elem =
+ &he_cap->he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp *he_mcs =
+ &he_cap->he_mcs_nss_supp;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+#ifdef CONFIG_MAC80211_MESH
+ case NL80211_IFTYPE_MESH_POINT:
+#endif
+ break;
+ default:
+ continue;
+ }
+
+ data[idx].types_mask = BIT(i);
+ he_cap->has_he = true;
+
+ he_cap_elem->mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE;
+ he_cap_elem->mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3;
+ he_cap_elem->mac_cap_info[4] =
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
+ else if (nss_160)
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+ else
+ he_cap_elem->phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD;
+ he_cap_elem->phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ;
+
+ switch (i) {
+ case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_RES;
+ he_cap_elem->mac_cap_info[2] |=
+ IEEE80211_HE_MAC_CAP2_BSR;
+ he_cap_elem->mac_cap_info[4] |=
+ IEEE80211_HE_MAC_CAP4_BQR;
+ he_cap_elem->mac_cap_info[5] |=
+ IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
+ break;
+ case NL80211_IFTYPE_STATION:
+ he_cap_elem->mac_cap_info[1] |=
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+
+ if (band == NL80211_BAND_2GHZ)
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
+ else
+ he_cap_elem->phy_cap_info[0] |=
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
+
+ he_cap_elem->phy_cap_info[1] |=
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ he_cap_elem->phy_cap_info[3] |=
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
+ he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[7] |=
+ IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI;
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484;
+ if (nss_160)
+ he_cap_elem->phy_cap_info[8] |=
+ IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+ IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK |
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB;
+ break;
+ }
+
+ memset(he_mcs, 0, sizeof(*he_mcs));
+ he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map_160);
+ he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map_160);
+
+ mt7915_set_stream_he_txbf_caps(phy, he_cap, i);
+
+ memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
+ if (he_cap_elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+ mt76_connac_gen_ppe_thresh(he_cap->ppe_thres, nss);
+ } else {
+ he_cap_elem->phy_cap_info[9] |=
+ u8_encode_bits(IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US,
+ IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK);
+ }
+
+ if (band == NL80211_BAND_6GHZ) {
+ u16 cap = IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+
+ cap |= u16_encode_bits(IEEE80211_HT_MPDU_DENSITY_2,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START) |
+ u16_encode_bits(IEEE80211_VHT_MAX_AMPDU_1024K,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP) |
+ u16_encode_bits(IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+
+ data[idx].he_6ghz_capa.capa = cpu_to_le16(cap);
+ }
+
+ idx++;
+ }
+
+ return idx;
+}
+
+void mt7915_set_stream_he_caps(struct mt7915_phy *phy)
+{
+ struct ieee80211_sband_iftype_data *data;
+ struct ieee80211_supported_band *band;
+ int n;
+
+ if (phy->mt76->cap.has_2ghz) {
+ data = phy->iftype[NL80211_BAND_2GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_2GHZ, data);
+
+ band = &phy->mt76->sband_2g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+
+ if (phy->mt76->cap.has_5ghz) {
+ data = phy->iftype[NL80211_BAND_5GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_5GHZ, data);
+
+ band = &phy->mt76->sband_5g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+
+ if (phy->mt76->cap.has_6ghz) {
+ data = phy->iftype[NL80211_BAND_6GHZ];
+ n = mt7915_init_he_caps(phy, NL80211_BAND_6GHZ, data);
+
+ band = &phy->mt76->sband_6g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
+}
+
+static void mt7915_unregister_ext_phy(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy = mt7915_ext_phy(dev);
+ struct mt76_phy *mphy = dev->mt76.phys[MT_BAND1];
+
+ if (!phy)
+ return;
+
+ mt7915_unregister_thermal(phy);
+ mt76_unregister_phy(mphy);
+ ieee80211_free_hw(mphy->hw);
+}
+
+static void mt7915_stop_hardware(struct mt7915_dev *dev)
+{
+ mt7915_mcu_exit(dev);
+ mt76_connac2_tx_token_put(&dev->mt76);
+ mt7915_dma_cleanup(dev);
+ tasklet_disable(&dev->mt76.irq_tasklet);
+
+ if (is_mt798x(&dev->mt76))
+ mt7986_wmac_disable(dev);
+}
+
+int mt7915_register_device(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy2;
+ int ret;
+
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
+ INIT_WORK(&dev->rc_work, mt7915_mac_sta_rc_work);
+ INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7915_mac_work);
+ INIT_LIST_HEAD(&dev->sta_rc_list);
+ INIT_LIST_HEAD(&dev->twt_list);
+
+ init_waitqueue_head(&dev->reset_wait);
+ INIT_WORK(&dev->reset_work, mt7915_mac_reset_work);
+ INIT_WORK(&dev->dump_work, mt7915_mac_dump_work);
+ mutex_init(&dev->dump_mutex);
+
+ dev->dbdc_support = mt7915_band_config(dev);
+
+ phy2 = mt7915_alloc_ext_phy(dev);
+ if (IS_ERR(phy2))
+ return PTR_ERR(phy2);
+
+ ret = mt7915_init_hardware(dev, phy2);
+ if (ret)
+ goto free_phy2;
+
+ mt7915_init_wiphy(&dev->phy);
+
+#ifdef CONFIG_NL80211_TESTMODE
+ dev->mt76.test_ops = &mt7915_testmode_ops;
+#endif
+
+ ret = mt76_register_device(&dev->mt76, true, mt76_rates,
+ ARRAY_SIZE(mt76_rates));
+ if (ret)
+ goto stop_hw;
+
+ ret = mt7915_thermal_init(&dev->phy);
+ if (ret)
+ goto unreg_dev;
+
+ ieee80211_queue_work(mt76_hw(dev), &dev->init_work);
+
+ if (phy2) {
+ ret = mt7915_register_ext_phy(dev, phy2);
+ if (ret)
+ goto unreg_thermal;
+ }
+
+ dev->recovery.hw_init_done = true;
+
+ ret = mt7915_init_debugfs(&dev->phy);
+ if (ret)
+ goto unreg_thermal;
+
+ ret = mt7915_coredump_register(dev);
+ if (ret)
+ goto unreg_thermal;
+
+ return 0;
+
+unreg_thermal:
+ mt7915_unregister_thermal(&dev->phy);
+unreg_dev:
+ mt76_unregister_device(&dev->mt76);
+stop_hw:
+ mt7915_stop_hardware(dev);
+free_phy2:
+ if (phy2)
+ ieee80211_free_hw(phy2->mt76->hw);
+ return ret;
+}
+
+void mt7915_unregister_device(struct mt7915_dev *dev)
+{
+ mt7915_unregister_ext_phy(dev);
+ mt7915_coredump_unregister(dev);
+ mt7915_unregister_thermal(&dev->phy);
+ mt76_unregister_device(&dev->mt76);
+ mt7915_stop_hardware(dev);
+
+ mt76_free_device(&dev->mt76);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
new file mode 100644
index 0000000000..2222fb9aa1
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -0,0 +1,2410 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include <linux/timekeeping.h>
+#include "coredump.h"
+#include "mt7915.h"
+#include "../dma.h"
+#include "mac.h"
+#include "mcu.h"
+
+#define to_rssi(field, rcpi) ((FIELD_GET(field, rcpi) - 220) / 2)
+
+static const struct mt7915_dfs_radar_spec etsi_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [5] = { 1, 0, 6, 32, 28, 0, 990, 5010, 17, 1, 1 },
+ [6] = { 1, 0, 9, 32, 28, 0, 615, 5010, 27, 1, 1 },
+ [7] = { 1, 0, 15, 32, 28, 0, 240, 445, 27, 1, 1 },
+ [8] = { 1, 0, 12, 32, 28, 0, 240, 510, 42, 1, 1 },
+ [9] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 12, 32, 28, { }, 126 },
+ [10] = { 1, 1, 0, 0, 0, 0, 2490, 3343, 14, 0, 0, 15, 32, 24, { }, 126 },
+ [11] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 18, 32, 28, { }, 54 },
+ [12] = { 1, 1, 0, 0, 0, 0, 823, 2510, 14, 0, 0, 27, 32, 24, { }, 54 },
+ },
+};
+
+static const struct mt7915_dfs_radar_spec fcc_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
+ },
+};
+
+static const struct mt7915_dfs_radar_spec jp_radar_specs = {
+ .pulse_th = { 110, -10, -80, 40, 5200, 128, 5200 },
+ .radar_pattern = {
+ [0] = { 1, 0, 8, 32, 28, 0, 508, 3076, 13, 1, 1 },
+ [1] = { 1, 0, 12, 32, 28, 0, 140, 240, 17, 1, 1 },
+ [2] = { 1, 0, 8, 32, 28, 0, 190, 510, 22, 1, 1 },
+ [3] = { 1, 0, 6, 32, 28, 0, 190, 510, 32, 1, 1 },
+ [4] = { 1, 0, 9, 255, 28, 0, 323, 343, 13, 1, 32 },
+ [13] = { 1, 0, 7, 32, 28, 0, 3836, 3856, 14, 1, 1 },
+ [14] = { 1, 0, 6, 32, 28, 0, 615, 5010, 110, 1, 1 },
+ [15] = { 1, 1, 0, 0, 0, 0, 15, 5010, 110, 0, 0, 12, 32, 28 },
+ },
+};
+
+static struct mt76_wcid *mt7915_rx_get_wcid(struct mt7915_dev *dev,
+ u16 idx, bool unicast)
+{
+ struct mt7915_sta *sta;
+ struct mt76_wcid *wcid;
+
+ if (idx >= ARRAY_SIZE(dev->mt76.wcid))
+ return NULL;
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (unicast || !wcid)
+ return wcid;
+
+ if (!wcid->sta)
+ return NULL;
+
+ sta = container_of(wcid, struct mt7915_sta, wcid);
+ if (!sta->vif)
+ return NULL;
+
+ return &sta->vif->sta.wcid;
+}
+
+bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
+{
+ mt76_rmw(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_WLAN_IDX,
+ FIELD_PREP(MT_WTBL_UPDATE_WLAN_IDX, idx) | mask);
+
+ return mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY,
+ 0, 5000);
+}
+
+u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw)
+{
+ mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
+ FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
+
+ return MT_WTBL_LMAC_OFFS(wcid, dw);
+}
+
+static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
+{
+ static const u8 ac_to_tid[] = {
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+ [IEEE80211_AC_VI] = 4,
+ [IEEE80211_AC_VO] = 6
+ };
+ struct ieee80211_sta *sta;
+ struct mt7915_sta *msta;
+ struct rate_info *rate;
+ u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
+ LIST_HEAD(sta_poll_list);
+ int i;
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ list_splice_init(&dev->mt76.sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ rcu_read_lock();
+
+ while (true) {
+ bool clear = false;
+ u32 addr, val;
+ u16 idx;
+ s8 rssi[4];
+ u8 bw;
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ if (list_empty(&sta_poll_list)) {
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ break;
+ }
+ msta = list_first_entry(&sta_poll_list,
+ struct mt7915_sta, wcid.poll_list);
+ list_del_init(&msta->wcid.poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ idx = msta->wcid.idx;
+
+ /* refresh peer's airtime reporting */
+ addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20);
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u32 tx_last = msta->airtime_ac[i];
+ u32 rx_last = msta->airtime_ac[i + 4];
+
+ msta->airtime_ac[i] = mt76_rr(dev, addr);
+ msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+
+ if ((tx_last | rx_last) & BIT(30))
+ clear = true;
+
+ addr += 8;
+ }
+
+ if (clear) {
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ }
+
+ if (!msta->wcid.sta)
+ continue;
+
+ sta = container_of((void *)msta, struct ieee80211_sta,
+ drv_priv);
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ u8 queue = mt76_connac_lmac_mapping(i);
+ u32 tx_cur = tx_time[queue];
+ u32 rx_cur = rx_time[queue];
+ u8 tid = ac_to_tid[i];
+
+ if (!tx_cur && !rx_cur)
+ continue;
+
+ ieee80211_sta_register_airtime(sta, tid, tx_cur,
+ rx_cur);
+ }
+
+ /*
+ * We don't support reading GI info from txs packets.
+ * For accurate tx status reporting and AQL improvement,
+ * we need to make sure that flags match so polling GI
+ * from per-sta counters directly.
+ */
+ rate = &msta->wcid.rate;
+ addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7);
+ val = mt76_rr(dev, addr);
+
+ switch (rate->bw) {
+ case RATE_INFO_BW_160:
+ bw = IEEE80211_STA_RX_BW_160;
+ break;
+ case RATE_INFO_BW_80:
+ bw = IEEE80211_STA_RX_BW_80;
+ break;
+ case RATE_INFO_BW_40:
+ bw = IEEE80211_STA_RX_BW_40;
+ break;
+ default:
+ bw = IEEE80211_STA_RX_BW_20;
+ break;
+ }
+
+ if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ u8 offs = 24 + 2 * bw;
+
+ rate->he_gi = (val & (0x3 << offs)) >> offs;
+ } else if (rate->flags &
+ (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
+ if (val & BIT(12 + bw))
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ else
+ rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
+ }
+
+ /* get signal strength of resp frames (CTS/BA/ACK) */
+ addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 30);
+ val = mt76_rr(dev, addr);
+
+ rssi[0] = to_rssi(GENMASK(7, 0), val);
+ rssi[1] = to_rssi(GENMASK(15, 8), val);
+ rssi[2] = to_rssi(GENMASK(23, 16), val);
+ rssi[3] = to_rssi(GENMASK(31, 14), val);
+
+ msta->ack_signal =
+ mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
+
+ ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
+ }
+
+ rcu_read_unlock();
+}
+
+void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ u32 addr;
+
+ addr = mt7915_mac_wtbl_lmac_addr(dev, mvif->sta.wcid.idx, 5);
+ if (enable)
+ mt76_set(dev, addr, BIT(5));
+ else
+ mt76_clear(dev, addr, BIT(5));
+}
+
+static void
+mt7915_wed_check_ppe(struct mt7915_dev *dev, struct mt76_queue *q,
+ struct mt7915_sta *msta, struct sk_buff *skb,
+ u32 info)
+{
+ struct ieee80211_vif *vif;
+ struct wireless_dev *wdev;
+
+ if (!msta || !msta->vif)
+ return;
+
+ if (!mt76_queue_is_wed_rx(q))
+ return;
+
+ if (!(info & MT_DMA_INFO_PPE_VLD))
+ return;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
+ drv_priv);
+ wdev = ieee80211_vif_to_wdev(vif);
+ skb->dev = wdev->netdev;
+
+ mtk_wed_device_ppe_check(&dev->mt76.mmio.wed, skb,
+ FIELD_GET(MT_DMA_PPE_CPU_REASON, info),
+ FIELD_GET(MT_DMA_PPE_ENTRY, info));
+}
+
+static int
+mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb,
+ enum mt76_rxq_id q, u32 *info)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_phy *phy = &dev->phy;
+ struct ieee80211_supported_band *sband;
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *rxv = NULL;
+ u32 rxd0 = le32_to_cpu(rxd[0]);
+ u32 rxd1 = le32_to_cpu(rxd[1]);
+ u32 rxd2 = le32_to_cpu(rxd[2]);
+ u32 rxd3 = le32_to_cpu(rxd[3]);
+ u32 rxd4 = le32_to_cpu(rxd[4]);
+ u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
+ bool unicast, insert_ccmp_hdr = false;
+ u8 remove_pad, amsdu_info;
+ u8 mode = 0, qos_ctl = 0;
+ struct mt7915_sta *msta = NULL;
+ u32 csum_status = *(u32 *)skb->cb;
+ bool hdr_trans;
+ u16 hdr_gap;
+ u16 seq_ctrl = 0;
+ __le16 fc = 0;
+ int idx;
+
+ memset(status, 0, sizeof(*status));
+
+ if ((rxd1 & MT_RXD1_NORMAL_BAND_IDX) && !phy->mt76->band_idx) {
+ mphy = dev->mt76.phys[MT_BAND1];
+ if (!mphy)
+ return -EINVAL;
+
+ phy = mphy->priv;
+ status->phy_idx = 1;
+ }
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state))
+ return -EINVAL;
+
+ if (rxd2 & MT_RXD2_NORMAL_AMSDU_ERR)
+ return -EINVAL;
+
+ hdr_trans = rxd2 & MT_RXD2_NORMAL_HDR_TRANS;
+ if (hdr_trans && (rxd1 & MT_RXD1_NORMAL_CM))
+ return -EINVAL;
+
+ /* ICV error or CCMP/BIP/WPI MIC error */
+ if (rxd1 & MT_RXD1_NORMAL_ICV_ERR)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
+ unicast = FIELD_GET(MT_RXD3_NORMAL_ADDR_TYPE, rxd3) == MT_RXD3_NORMAL_U2M;
+ idx = FIELD_GET(MT_RXD1_NORMAL_WLAN_IDX, rxd1);
+ status->wcid = mt7915_rx_get_wcid(dev, idx, unicast);
+
+ if (status->wcid) {
+ msta = container_of(status->wcid, struct mt7915_sta, wcid);
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ if (list_empty(&msta->wcid.poll_list))
+ list_add_tail(&msta->wcid.poll_list,
+ &dev->mt76.sta_poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+ }
+
+ status->freq = mphy->chandef.chan->center_freq;
+ status->band = mphy->chandef.chan->band;
+ if (status->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else if (status->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ if (!sband->channels)
+ return -EINVAL;
+
+ if ((rxd0 & csum_mask) == csum_mask &&
+ !(csum_status & (BIT(0) | BIT(2) | BIT(3))))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (rxd1 & MT_RXD1_NORMAL_FCS_ERR)
+ status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (rxd1 & MT_RXD1_NORMAL_TKIP_MIC_ERR)
+ status->flag |= RX_FLAG_MMIC_ERROR;
+
+ if (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1) != 0 &&
+ !(rxd1 & (MT_RXD1_NORMAL_CLM | MT_RXD1_NORMAL_CM))) {
+ status->flag |= RX_FLAG_DECRYPTED;
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ status->flag |= RX_FLAG_MMIC_STRIPPED | RX_FLAG_MIC_STRIPPED;
+ }
+
+ remove_pad = FIELD_GET(MT_RXD2_NORMAL_HDR_OFFSET, rxd2);
+
+ if (rxd2 & MT_RXD2_NORMAL_MAX_LEN_ERROR)
+ return -EINVAL;
+
+ rxd += 6;
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_4) {
+ u32 v0 = le32_to_cpu(rxd[0]);
+ u32 v2 = le32_to_cpu(rxd[2]);
+
+ fc = cpu_to_le16(FIELD_GET(MT_RXD6_FRAME_CONTROL, v0));
+ qos_ctl = FIELD_GET(MT_RXD8_QOS_CTL, v2);
+ seq_ctrl = FIELD_GET(MT_RXD8_SEQ_CTRL, v2);
+
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_1) {
+ u8 *data = (u8 *)rxd;
+
+ if (status->flag & RX_FLAG_DECRYPTED) {
+ switch (FIELD_GET(MT_RXD1_NORMAL_SEC_MODE, rxd1)) {
+ case MT_CIPHER_AES_CCMP:
+ case MT_CIPHER_CCMP_CCX:
+ case MT_CIPHER_CCMP_256:
+ insert_ccmp_hdr =
+ FIELD_GET(MT_RXD2_NORMAL_FRAG, rxd2);
+ fallthrough;
+ case MT_CIPHER_TKIP:
+ case MT_CIPHER_TKIP_NO_MIC:
+ case MT_CIPHER_GCMP:
+ case MT_CIPHER_GCMP_256:
+ status->iv[0] = data[5];
+ status->iv[1] = data[4];
+ status->iv[2] = data[3];
+ status->iv[3] = data[2];
+ status->iv[4] = data[1];
+ status->iv[5] = data[0];
+ break;
+ default:
+ break;
+ }
+ }
+ rxd += 4;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_2) {
+ status->timestamp = le32_to_cpu(rxd[0]);
+ status->flag |= RX_FLAG_MACTIME_START;
+
+ if (!(rxd2 & MT_RXD2_NORMAL_NON_AMPDU)) {
+ status->flag |= RX_FLAG_AMPDU_DETAILS;
+
+ /* all subframes of an A-MPDU have the same timestamp */
+ if (phy->rx_ampdu_ts != status->timestamp) {
+ if (!++phy->ampdu_ref)
+ phy->ampdu_ref++;
+ }
+ phy->rx_ampdu_ts = status->timestamp;
+
+ status->ampdu_ref = phy->ampdu_ref;
+ }
+
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ /* RXD Group 3 - P-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_3) {
+ u32 v0, v1;
+ int ret;
+
+ rxv = rxd;
+ rxd += 2;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+
+ v0 = le32_to_cpu(rxv[0]);
+ v1 = le32_to_cpu(rxv[1]);
+
+ if (v0 & MT_PRXV_HT_AD_CODE)
+ status->enc_flags |= RX_ENC_FLAG_LDPC;
+
+ status->chains = mphy->antenna_mask;
+ status->chain_signal[0] = to_rssi(MT_PRXV_RCPI0, v1);
+ status->chain_signal[1] = to_rssi(MT_PRXV_RCPI1, v1);
+ status->chain_signal[2] = to_rssi(MT_PRXV_RCPI2, v1);
+ status->chain_signal[3] = to_rssi(MT_PRXV_RCPI3, v1);
+
+ /* RXD Group 5 - C-RXV */
+ if (rxd1 & MT_RXD1_NORMAL_GROUP_5) {
+ rxd += 18;
+ if ((u8 *)rxd - skb->data >= skb->len)
+ return -EINVAL;
+ }
+
+ if (!is_mt7915(&dev->mt76) || (rxd1 & MT_RXD1_NORMAL_GROUP_5)) {
+ ret = mt76_connac2_mac_fill_rx_rate(&dev->mt76, status,
+ sband, rxv, &mode);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ amsdu_info = FIELD_GET(MT_RXD4_NORMAL_PAYLOAD_FORMAT, rxd4);
+ status->amsdu = !!amsdu_info;
+ if (status->amsdu) {
+ status->first_amsdu = amsdu_info == MT_RXD4_FIRST_AMSDU_FRAME;
+ status->last_amsdu = amsdu_info == MT_RXD4_LAST_AMSDU_FRAME;
+ }
+
+ hdr_gap = (u8 *)rxd - skb->data + 2 * remove_pad;
+ if (hdr_trans && ieee80211_has_morefrags(fc)) {
+ struct ieee80211_vif *vif;
+ int err;
+
+ if (!msta || !msta->vif)
+ return -EINVAL;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif,
+ drv_priv);
+ err = mt76_connac2_reverse_frag0_hdr_trans(vif, skb, hdr_gap);
+ if (err)
+ return err;
+
+ hdr_trans = false;
+ } else {
+ int pad_start = 0;
+
+ skb_pull(skb, hdr_gap);
+ if (!hdr_trans && status->amsdu) {
+ pad_start = ieee80211_get_hdrlen_from_skb(skb);
+ } else if (hdr_trans && (rxd2 & MT_RXD2_NORMAL_HDR_TRANS_ERROR)) {
+ /*
+ * When header translation failure is indicated,
+ * the hardware will insert an extra 2-byte field
+ * containing the data length after the protocol
+ * type field. This happens either when the LLC-SNAP
+ * pattern did not match, or if a VLAN header was
+ * detected.
+ */
+ pad_start = 12;
+ if (get_unaligned_be16(skb->data + pad_start) == ETH_P_8021Q)
+ pad_start += 4;
+ else
+ pad_start = 0;
+ }
+
+ if (pad_start) {
+ memmove(skb->data + 2, skb->data, pad_start);
+ skb_pull(skb, 2);
+ }
+ }
+
+ if (!hdr_trans) {
+ struct ieee80211_hdr *hdr;
+
+ if (insert_ccmp_hdr) {
+ u8 key_id = FIELD_GET(MT_RXD1_NORMAL_KEY_ID, rxd1);
+
+ mt76_insert_ccmp_hdr(skb, key_id);
+ }
+
+ hdr = mt76_skb_get_hdr(skb);
+ fc = hdr->frame_control;
+ if (ieee80211_is_data_qos(fc)) {
+ seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
+ qos_ctl = *ieee80211_get_qos_ctl(hdr);
+ }
+ } else {
+ status->flag |= RX_FLAG_8023;
+ mt7915_wed_check_ppe(dev, &dev->mt76.q_rx[q], msta, skb,
+ *info);
+ }
+
+ if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
+ mt76_connac2_mac_decode_he_radiotap(&dev->mt76, skb, rxv, mode);
+
+ if (!status->wcid || !ieee80211_is_data_qos(fc))
+ return 0;
+
+ status->aggr = unicast &&
+ !ieee80211_is_qos_nullfunc(fc);
+ status->qos_ctl = qos_ctl;
+ status->seqno = IEEE80211_SEQ_TO_SN(seq_ctrl);
+
+ return 0;
+}
+
+static void
+mt7915_mac_fill_rx_vector(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ struct mt7915_phy *phy = &dev->phy;
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *rxv_hdr = rxd + 2;
+ __le32 *rxv = rxd + 4;
+ u32 rcpi, ib_rssi, wb_rssi, v20, v21;
+ u8 band_idx;
+ s32 foe;
+ u8 snr;
+ int i;
+
+ band_idx = le32_get_bits(rxv_hdr[1], MT_RXV_HDR_BAND_IDX);
+ if (band_idx && !phy->mt76->band_idx) {
+ phy = mt7915_ext_phy(dev);
+ if (!phy)
+ goto out;
+ }
+
+ rcpi = le32_to_cpu(rxv[6]);
+ ib_rssi = le32_to_cpu(rxv[7]);
+ wb_rssi = le32_to_cpu(rxv[8]) >> 5;
+
+ for (i = 0; i < 4; i++, rcpi >>= 8, ib_rssi >>= 8, wb_rssi >>= 9) {
+ if (i == 3)
+ wb_rssi = le32_to_cpu(rxv[9]);
+
+ phy->test.last_rcpi[i] = rcpi & 0xff;
+ phy->test.last_ib_rssi[i] = ib_rssi & 0xff;
+ phy->test.last_wb_rssi[i] = wb_rssi & 0xff;
+ }
+
+ v20 = le32_to_cpu(rxv[20]);
+ v21 = le32_to_cpu(rxv[21]);
+
+ foe = FIELD_GET(MT_CRXV_FOE_LO, v20) |
+ (FIELD_GET(MT_CRXV_FOE_HI, v21) << MT_CRXV_FOE_SHIFT);
+
+ snr = FIELD_GET(MT_CRXV_SNR, v20) - 16;
+
+ phy->test.last_freq_offset = foe;
+ phy->test.last_snr = snr;
+out:
+#endif
+ dev_kfree_skb(skb);
+}
+
+static void
+mt7915_mac_write_txwi_tm(struct mt7915_phy *phy, __le32 *txwi,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ const struct ieee80211_rate *r;
+ u8 bw, mode, nss = td->tx_rate_nss;
+ u8 rate_idx = td->tx_rate_idx;
+ u16 rateval = 0;
+ u32 val;
+ bool cck = false;
+ int band;
+
+ if (skb != phy->mt76->test.tx_skb)
+ return;
+
+ switch (td->tx_rate_mode) {
+ case MT76_TM_TX_MODE_HT:
+ nss = 1 + (rate_idx >> 3);
+ mode = MT_PHY_TYPE_HT;
+ break;
+ case MT76_TM_TX_MODE_VHT:
+ mode = MT_PHY_TYPE_VHT;
+ break;
+ case MT76_TM_TX_MODE_HE_SU:
+ mode = MT_PHY_TYPE_HE_SU;
+ break;
+ case MT76_TM_TX_MODE_HE_EXT_SU:
+ mode = MT_PHY_TYPE_HE_EXT_SU;
+ break;
+ case MT76_TM_TX_MODE_HE_TB:
+ mode = MT_PHY_TYPE_HE_TB;
+ break;
+ case MT76_TM_TX_MODE_HE_MU:
+ mode = MT_PHY_TYPE_HE_MU;
+ break;
+ case MT76_TM_TX_MODE_CCK:
+ cck = true;
+ fallthrough;
+ case MT76_TM_TX_MODE_OFDM:
+ band = phy->mt76->chandef.chan->band;
+ if (band == NL80211_BAND_2GHZ && !cck)
+ rate_idx += 4;
+
+ r = &phy->mt76->hw->wiphy->bands[band]->bitrates[rate_idx];
+ val = cck ? r->hw_value_short : r->hw_value;
+
+ mode = val >> 8;
+ rate_idx = val & 0xff;
+ break;
+ default:
+ mode = MT_PHY_TYPE_OFDM;
+ break;
+ }
+
+ switch (phy->mt76->chandef.width) {
+ case NL80211_CHAN_WIDTH_40:
+ bw = 1;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = 2;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ bw = 3;
+ break;
+ default:
+ bw = 0;
+ break;
+ }
+
+ if (td->tx_rate_stbc && nss == 1) {
+ nss++;
+ rateval |= MT_TX_RATE_STBC;
+ }
+
+ rateval |= FIELD_PREP(MT_TX_RATE_IDX, rate_idx) |
+ FIELD_PREP(MT_TX_RATE_MODE, mode) |
+ FIELD_PREP(MT_TX_RATE_NSS, nss - 1);
+
+ txwi[2] |= cpu_to_le32(MT_TXD2_FIX_RATE);
+
+ le32p_replace_bits(&txwi[3], 1, MT_TXD3_REM_TX_COUNT);
+ if (td->tx_rate_mode < MT76_TM_TX_MODE_HT)
+ txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
+
+ val = MT_TXD6_FIXED_BW |
+ FIELD_PREP(MT_TXD6_BW, bw) |
+ FIELD_PREP(MT_TXD6_TX_RATE, rateval) |
+ FIELD_PREP(MT_TXD6_SGI, td->tx_rate_sgi);
+
+ /* for HE_SU/HE_EXT_SU PPDU
+ * - 1x, 2x, 4x LTF + 0.8us GI
+ * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
+ * for HE_MU PPDU
+ * - 2x, 4x LTF + 0.8us GI
+ * - 2x LTF + 1.6us GI, 4x LTF + 3.2us GI
+ * for HE_TB PPDU
+ * - 1x, 2x LTF + 1.6us GI
+ * - 4x LTF + 3.2us GI
+ */
+ if (mode >= MT_PHY_TYPE_HE_SU)
+ val |= FIELD_PREP(MT_TXD6_HELTF, td->tx_ltf);
+
+ if (td->tx_rate_ldpc || (bw > 0 && mode >= MT_PHY_TYPE_HE_SU))
+ val |= MT_TXD6_LDPC;
+
+ txwi[3] &= ~cpu_to_le32(MT_TXD3_SN_VALID);
+ txwi[6] |= cpu_to_le32(val);
+ txwi[7] |= cpu_to_le32(FIELD_PREP(MT_TXD7_SPE_IDX,
+ phy->test.spe_idx));
+#endif
+}
+
+void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, u32 changed)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u8 phy_idx = (info->hw_queue & MT_TX_HW_QUEUE_PHY) >> 2;
+ struct mt76_phy *mphy = &dev->phy;
+
+ if (phy_idx && dev->phys[MT_BAND1])
+ mphy = dev->phys[MT_BAND1];
+
+ mt76_connac2_mac_write_txwi(dev, txwi, skb, wcid, key, pid, qid, changed);
+
+ if (mt76_testmode_enabled(mphy))
+ mt7915_mac_write_txwi_tm(mphy->priv, txwi, skb);
+}
+
+int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_connac_fw_txp *txp;
+ struct mt76_txwi_cache *t;
+ int id, i, nbuf = tx_info->nbuf - 1;
+ u8 *txwi = (u8 *)txwi_ptr;
+ int pid;
+
+ if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ return -EINVAL;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ if (sta) {
+ struct mt7915_sta *msta;
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (time_after(jiffies, msta->jiffies + HZ / 4)) {
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ msta->jiffies = jiffies;
+ }
+ }
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ id = mt76_token_consume(mdev, &t);
+ if (id < 0)
+ return id;
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ mt7915_mac_write_txwi(mdev, txwi_ptr, tx_info->skb, wcid, pid, key,
+ qid, 0);
+
+ txp = (struct mt76_connac_fw_txp *)(txwi + MT_TXD_SIZE);
+ for (i = 0; i < nbuf; i++) {
+ txp->buf[i] = cpu_to_le32(tx_info->buf[i + 1].addr);
+ txp->len[i] = cpu_to_le16(tx_info->buf[i + 1].len);
+ }
+ txp->nbuf = nbuf;
+
+ txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD | MT_CT_INFO_FROM_HOST);
+
+ if (!key)
+ txp->flags |= cpu_to_le16(MT_CT_INFO_NONE_CIPHER_FRAME);
+
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+ ieee80211_is_mgmt(hdr->frame_control))
+ txp->flags |= cpu_to_le16(MT_CT_INFO_MGMT_FRAME);
+
+ if (vif) {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ txp->bss_idx = mvif->mt76.idx;
+ }
+
+ txp->token = cpu_to_le16(id);
+ if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags))
+ txp->rept_wds_wcid = cpu_to_le16(wcid->idx);
+ else
+ txp->rept_wds_wcid = cpu_to_le16(0x3ff);
+ tx_info->skb = NULL;
+
+ /* pass partial skb header to fw */
+ tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->buf[1].skip_unmap = true;
+ tx_info->nbuf = MT_CT_DMA_BUF_NUM;
+
+ return 0;
+}
+
+u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id)
+{
+ struct mt76_connac_fw_txp *txp = ptr + MT_TXD_SIZE;
+ __le32 *txwi = ptr;
+ u32 val;
+
+ memset(ptr, 0, MT_TXD_SIZE + sizeof(*txp));
+
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, MT_TXD_SIZE) |
+ FIELD_PREP(MT_TXD0_PKT_FMT, MT_TX_TYPE_CT);
+ txwi[0] = cpu_to_le32(val);
+
+ val = MT_TXD1_LONG_FORMAT |
+ FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_3);
+ txwi[1] = cpu_to_le32(val);
+
+ txp->token = cpu_to_le16(token_id);
+ txp->nbuf = 1;
+ txp->buf[0] = cpu_to_le32(phys + MT_TXD_SIZE + sizeof(*txp));
+
+ return MT_TXD_SIZE + sizeof(*txp);
+}
+
+static void
+mt7915_mac_tx_free_prepare(struct mt7915_dev *dev)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_phy *mphy_ext = mdev->phys[MT_BAND1];
+
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
+ if (mphy_ext) {
+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, mphy_ext->q_tx[MT_TXQ_BE], false);
+ }
+}
+
+static void
+mt7915_mac_tx_free_done(struct mt7915_dev *dev,
+ struct list_head *free_list, bool wake)
+{
+ struct sk_buff *skb, *tmp;
+
+ mt7915_mac_sta_poll(dev);
+
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+
+ list_for_each_entry_safe(skb, tmp, free_list, list) {
+ skb_list_del_init(skb);
+ napi_consume_skb(skb, 1);
+ }
+}
+
+static void
+mt7915_mac_tx_free(struct mt7915_dev *dev, void *data, int len)
+{
+ struct mt76_connac_tx_free *free = data;
+ __le32 *tx_info = (__le32 *)(data + sizeof(*free));
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid = NULL;
+ LIST_HEAD(free_list);
+ void *end = data + len;
+ bool v3, wake = false;
+ u16 total, count = 0;
+ u32 txd = le32_to_cpu(free->txd);
+ __le32 *cur_info;
+
+ mt7915_mac_tx_free_prepare(dev);
+
+ total = le16_get_bits(free->ctrl, MT_TX_FREE_MSDU_CNT);
+ v3 = (FIELD_GET(MT_TX_FREE_VER, txd) == 0x4);
+
+ for (cur_info = tx_info; count < total; cur_info++) {
+ u32 msdu, info;
+ u8 i;
+
+ if (WARN_ON_ONCE((void *)cur_info >= end))
+ return;
+
+ /*
+ * 1'b1: new wcid pair.
+ * 1'b0: msdu_id with the same 'wcid pair' as above.
+ */
+ info = le32_to_cpu(*cur_info);
+ if (info & MT_TX_FREE_PAIR) {
+ struct mt7915_sta *msta;
+ u16 idx;
+
+ idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ continue;
+
+ msta = container_of(wcid, struct mt7915_sta, wcid);
+ spin_lock_bh(&mdev->sta_poll_lock);
+ if (list_empty(&msta->wcid.poll_list))
+ list_add_tail(&msta->wcid.poll_list,
+ &mdev->sta_poll_list);
+ spin_unlock_bh(&mdev->sta_poll_lock);
+ continue;
+ }
+
+ if (!mtk_wed_device_active(&mdev->mmio.wed) && wcid) {
+ u32 tx_retries = 0, tx_failed = 0;
+
+ if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3)) {
+ tx_retries =
+ FIELD_GET(MT_TX_FREE_COUNT_V3, info) - 1;
+ tx_failed = tx_retries +
+ !!FIELD_GET(MT_TX_FREE_STAT_V3, info);
+ } else if (!v3 && (info & MT_TX_FREE_MPDU_HEADER)) {
+ tx_retries =
+ FIELD_GET(MT_TX_FREE_COUNT, info) - 1;
+ tx_failed = tx_retries +
+ !!FIELD_GET(MT_TX_FREE_STAT, info);
+ }
+ wcid->stats.tx_retries += tx_retries;
+ wcid->stats.tx_failed += tx_failed;
+ }
+
+ if (v3 && (info & MT_TX_FREE_MPDU_HEADER_V3))
+ continue;
+
+ for (i = 0; i < 1 + v3; i++) {
+ if (v3) {
+ msdu = (info >> (15 * i)) & MT_TX_FREE_MSDU_ID_V3;
+ if (msdu == MT_TX_FREE_MSDU_ID_V3)
+ continue;
+ } else {
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ }
+ count++;
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt76_connac2_txwi_free(mdev, txwi, sta, &free_list);
+ }
+ }
+
+ mt7915_mac_tx_free_done(dev, &free_list, wake);
+}
+
+static void
+mt7915_mac_tx_free_v0(struct mt7915_dev *dev, void *data, int len)
+{
+ struct mt76_connac_tx_free *free = data;
+ __le16 *info = (__le16 *)(data + sizeof(*free));
+ struct mt76_dev *mdev = &dev->mt76;
+ void *end = data + len;
+ LIST_HEAD(free_list);
+ bool wake = false;
+ u8 i, count;
+
+ mt7915_mac_tx_free_prepare(dev);
+
+ count = FIELD_GET(MT_TX_FREE_MSDU_CNT_V0, le16_to_cpu(free->ctrl));
+ if (WARN_ON_ONCE((void *)&info[count] > end))
+ return;
+
+ for (i = 0; i < count; i++) {
+ struct mt76_txwi_cache *txwi;
+ u16 msdu = le16_to_cpu(info[i]);
+
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt76_connac2_txwi_free(mdev, txwi, NULL, &free_list);
+ }
+
+ mt7915_mac_tx_free_done(dev, &free_list, wake);
+}
+
+static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
+{
+ struct mt7915_sta *msta = NULL;
+ struct mt76_wcid *wcid;
+ __le32 *txs_data = data;
+ u16 wcidx;
+ u8 pid;
+
+ wcidx = le32_get_bits(txs_data[2], MT_TXS2_WCID);
+ pid = le32_get_bits(txs_data[3], MT_TXS3_PID);
+
+ if (pid < MT_PACKET_ID_WED)
+ return;
+
+ if (wcidx >= mt7915_wtbl_size(dev))
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ goto out;
+
+ msta = container_of(wcid, struct mt7915_sta, wcid);
+
+ if (pid == MT_PACKET_ID_WED)
+ mt76_connac2_mac_fill_txs(&dev->mt76, wcid, txs_data);
+ else
+ mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
+
+ if (!wcid->sta)
+ goto out;
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ if (list_empty(&msta->wcid.poll_list))
+ list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+out:
+ rcu_read_unlock();
+}
+
+bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ __le32 *rxd = (__le32 *)data;
+ __le32 *end = (__le32 *)&rxd[len / 4];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7915_mac_tx_free(dev, data, len);
+ return false;
+ case PKT_TYPE_TXRX_NOTIFY_V0:
+ mt7915_mac_tx_free_v0(dev, data, len);
+ return false;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7915_mac_add_txs(dev, rxd);
+ return false;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, data, len);
+ return false;
+ default:
+ return true;
+ }
+}
+
+void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb, u32 *info)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+
+ type = le32_get_bits(rxd[0], MT_RXD0_PKT_TYPE);
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7915_mac_tx_free(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
+ break;
+ case PKT_TYPE_TXRX_NOTIFY_V0:
+ mt7915_mac_tx_free_v0(dev, skb->data, skb->len);
+ napi_consume_skb(skb, 1);
+ break;
+ case PKT_TYPE_RX_EVENT:
+ mt7915_mcu_rx_event(dev, skb);
+ break;
+ case PKT_TYPE_TXRXV:
+ mt7915_mac_fill_rx_vector(dev, skb);
+ break;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7915_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_RX_FW_MONITOR:
+ mt7915_debugfs_rx_fw_monitor(dev, skb->data, skb->len);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_NORMAL:
+ if (!mt7915_mac_fill_rx(dev, skb, q, info)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ fallthrough;
+ default:
+ dev_kfree_skb(skb);
+ break;
+ }
+}
+
+void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ u32 reg = MT_WF_PHY_RX_CTRL1(phy->mt76->band_idx);
+
+ mt76_clear(dev, reg, MT_WF_PHY_RX_CTRL1_STSCNT_EN);
+ mt76_set(dev, reg, BIT(11) | BIT(9));
+}
+
+void mt7915_mac_reset_counters(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ mt76_rr(dev, MT_TX_AGG_CNT(phy->mt76->band_idx, i));
+ mt76_rr(dev, MT_TX_AGG_CNT2(phy->mt76->band_idx, i));
+ }
+
+ phy->mt76->survey_time = ktime_get_boottime();
+ memset(phy->mt76->aggr_stats, 0, sizeof(phy->mt76->aggr_stats));
+
+ /* reset airtime counters */
+ mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(phy->mt76->band_idx),
+ MT_WF_RMAC_MIB_RXTIME_CLR);
+
+ mt7915_mcu_get_chan_mib_info(phy, true);
+}
+
+void mt7915_mac_set_timing(struct mt7915_phy *phy)
+{
+ s16 coverage_class = phy->coverage_class;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_phy *ext_phy = mt7915_ext_phy(dev);
+ u32 val, reg_offset;
+ u32 cck = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 231) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
+ u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
+ u8 band = phy->mt76->band_idx;
+ int eifs_ofdm = 360, sifs = 10, offset;
+ bool a_band = !(phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ);
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
+
+ if (ext_phy)
+ coverage_class = max_t(s16, dev->phy.coverage_class,
+ ext_phy->coverage_class);
+
+ mt76_set(dev, MT_ARB_SCR(band),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+ udelay(1);
+
+ offset = 3 * coverage_class;
+ reg_offset = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, offset) |
+ FIELD_PREP(MT_TIMEOUT_VAL_CCA, offset);
+
+ if (!is_mt7915(&dev->mt76)) {
+ if (!a_band) {
+ mt76_wr(dev, MT_TMAC_ICR1(band),
+ FIELD_PREP(MT_IFS_EIFS_CCK, 314));
+ eifs_ofdm = 78;
+ } else {
+ eifs_ofdm = 84;
+ }
+ } else if (a_band) {
+ sifs = 16;
+ }
+
+ mt76_wr(dev, MT_TMAC_CDTR(band), cck + reg_offset);
+ mt76_wr(dev, MT_TMAC_ODTR(band), ofdm + reg_offset);
+ mt76_wr(dev, MT_TMAC_ICR0(band),
+ FIELD_PREP(MT_IFS_EIFS_OFDM, eifs_ofdm) |
+ FIELD_PREP(MT_IFS_RIFS, 2) |
+ FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+
+ if (phy->slottime < 20 || a_band)
+ val = MT7915_CFEND_RATE_DEFAULT;
+ else
+ val = MT7915_CFEND_RATE_11B;
+
+ mt76_rmw_field(dev, MT_AGG_ACR0(band), MT_AGG_ACR_CFEND_RATE, val);
+ mt76_clear(dev, MT_ARB_SCR(band),
+ MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
+}
+
+void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool band)
+{
+ u32 reg;
+
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RXTD12(band) :
+ MT_WF_PHY_RXTD12_MT7916(band);
+ mt76_set(dev, reg,
+ MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY |
+ MT_WF_PHY_RXTD12_IRPI_SW_CLR);
+
+ reg = is_mt7915(&dev->mt76) ? MT_WF_PHY_RX_CTRL1(band) :
+ MT_WF_PHY_RX_CTRL1_MT7916(band);
+ mt76_set(dev, reg, FIELD_PREP(MT_WF_PHY_RX_CTRL1_IPI_EN, 0x5));
+}
+
+static u8
+mt7915_phy_get_nf(struct mt7915_phy *phy, int idx)
+{
+ static const u8 nf_power[] = { 92, 89, 86, 83, 80, 75, 70, 65, 60, 55, 52 };
+ struct mt7915_dev *dev = phy->dev;
+ u32 val, sum = 0, n = 0;
+ int nss, i;
+
+ for (nss = 0; nss < hweight8(phy->mt76->chainmask); nss++) {
+ u32 reg = is_mt7915(&dev->mt76) ?
+ MT_WF_IRPI_NSS(0, nss + (idx << dev->dbdc_support)) :
+ MT_WF_IRPI_NSS_MT7916(idx, nss);
+
+ for (i = 0; i < ARRAY_SIZE(nf_power); i++, reg += 4) {
+ val = mt76_rr(dev, reg);
+ sum += val * nf_power[i];
+ n += val;
+ }
+ }
+
+ if (!n)
+ return 0;
+
+ return sum / n;
+}
+
+void mt7915_update_channel(struct mt76_phy *mphy)
+{
+ struct mt7915_phy *phy = (struct mt7915_phy *)mphy->priv;
+ struct mt76_channel_state *state = mphy->chan_state;
+ int nf;
+
+ mt7915_mcu_get_chan_mib_info(phy, false);
+
+ nf = mt7915_phy_get_nf(phy, phy->mt76->band_idx);
+ if (!phy->noise)
+ phy->noise = nf << 4;
+ else if (nf)
+ phy->noise += nf - (phy->noise >> 4);
+
+ state->noise = -(phy->noise >> 4);
+}
+
+static bool
+mt7915_wait_reset_state(struct mt7915_dev *dev, u32 state)
+{
+ bool ret;
+
+ ret = wait_event_timeout(dev->reset_wait,
+ (READ_ONCE(dev->recovery.state) & state),
+ MT7915_RESET_TIMEOUT);
+
+ WARN(!ret, "Timeout waiting for MCU reset state %x\n", state);
+ return ret;
+}
+
+static void
+mt7915_update_vif_beacon(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ieee80211_hw *hw = priv;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ mt7915_mcu_add_beacon(hw, vif, vif->bss_conf.enable_beacon,
+ BSS_CHANGED_BEACON_ENABLED);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt7915_update_beacons(struct mt7915_dev *dev)
+{
+ struct mt76_phy *mphy_ext = dev->mt76.phys[MT_BAND1];
+
+ ieee80211_iterate_active_interfaces(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_update_vif_beacon, dev->mt76.hw);
+
+ if (!mphy_ext)
+ return;
+
+ ieee80211_iterate_active_interfaces(mphy_ext->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_update_vif_beacon, mphy_ext->hw);
+}
+
+static int
+mt7915_mac_restart(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy2;
+ struct mt76_phy *ext_phy;
+ struct mt76_dev *mdev = &dev->mt76;
+ int i, ret;
+
+ ext_phy = dev->mt76.phys[MT_BAND1];
+ phy2 = ext_phy ? ext_phy->priv : NULL;
+
+ if (dev->hif2) {
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0x0);
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
+ }
+
+ if (dev_is_pci(mdev->dev)) {
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+ if (dev->hif2) {
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0x0);
+ else
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0x0);
+ }
+ }
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ if (ext_phy) {
+ set_bit(MT76_RESET, &ext_phy->state);
+ set_bit(MT76_MCU_RESET, &ext_phy->state);
+ }
+
+ /* lock/unlock all queues to ensure that no tx is pending */
+ mt76_txq_schedule_all(&dev->mphy);
+ if (ext_phy)
+ mt76_txq_schedule_all(ext_phy);
+
+ /* disable all tx/rx napi */
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ mt76_for_each_q_rx(mdev, i) {
+ if (mdev->q_rx[i].ndesc)
+ napi_disable(&dev->mt76.napi[i]);
+ }
+ napi_disable(&dev->mt76.tx_napi);
+
+ /* token reinit */
+ mt76_connac2_tx_token_put(&dev->mt76);
+ idr_init(&dev->mt76.token);
+
+ mt7915_dma_reset(dev, true);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(mdev, i) {
+ if (mdev->q_rx[i].ndesc) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
+ }
+ local_bh_enable();
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
+ mt76_wr(dev, MT_INT_MASK_CSR, dev->mt76.mmio.irqmask);
+ mt76_wr(dev, MT_INT_SOURCE_CSR, ~0);
+
+ if (dev->hif2) {
+ mt76_wr(dev, MT_INT1_MASK_CSR, dev->mt76.mmio.irqmask);
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, ~0);
+ }
+ if (dev_is_pci(mdev->dev)) {
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+ if (dev->hif2) {
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ else
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
+ }
+ }
+
+ /* load firmware */
+ ret = mt7915_mcu_init_firmware(dev);
+ if (ret)
+ goto out;
+
+ /* set the necessary init items */
+ ret = mt7915_mcu_set_eeprom(dev);
+ if (ret)
+ goto out;
+
+ mt7915_mac_init(dev);
+ mt7915_init_txpower(dev, &dev->mphy.sband_2g.sband);
+ mt7915_init_txpower(dev, &dev->mphy.sband_5g.sband);
+ ret = mt7915_txbf_init(dev);
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state)) {
+ ret = mt7915_run(dev->mphy.hw);
+ if (ret)
+ goto out;
+ }
+
+ if (ext_phy && test_bit(MT76_STATE_RUNNING, &ext_phy->state)) {
+ ret = mt7915_run(ext_phy->hw);
+ if (ret)
+ goto out;
+ }
+
+out:
+ /* reset done */
+ clear_bit(MT76_RESET, &dev->mphy.state);
+ if (phy2)
+ clear_bit(MT76_RESET, &phy2->mt76->state);
+
+ local_bh_disable();
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+ local_bh_enable();
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ return ret;
+}
+
+static void
+mt7915_mac_full_reset(struct mt7915_dev *dev)
+{
+ struct mt76_phy *ext_phy;
+ int i;
+
+ ext_phy = dev->mt76.phys[MT_BAND1];
+
+ dev->recovery.hw_full_reset = true;
+
+ wake_up(&dev->mt76.mcu.wait);
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_stop_queues(ext_phy->hw);
+
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
+ if (ext_phy)
+ cancel_delayed_work_sync(&ext_phy->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+ for (i = 0; i < 10; i++) {
+ if (!mt7915_mac_restart(dev))
+ break;
+ }
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (i == 10)
+ dev_err(dev->mt76.dev, "chip full reset failed\n");
+
+ ieee80211_restart_hw(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_restart_hw(ext_phy->hw);
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_wake_queues(ext_phy->hw);
+
+ dev->recovery.hw_full_reset = false;
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
+ MT7915_WATCHDOG_TIME);
+ if (ext_phy)
+ ieee80211_queue_delayed_work(ext_phy->hw,
+ &ext_phy->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
+/* system error recovery */
+void mt7915_mac_reset_work(struct work_struct *work)
+{
+ struct mt7915_phy *phy2;
+ struct mt76_phy *ext_phy;
+ struct mt7915_dev *dev;
+ int i;
+
+ dev = container_of(work, struct mt7915_dev, reset_work);
+ ext_phy = dev->mt76.phys[MT_BAND1];
+ phy2 = ext_phy ? ext_phy->priv : NULL;
+
+ /* chip full reset */
+ if (dev->recovery.restart) {
+ /* disable WA/WM WDT */
+ mt76_clear(dev, MT_WFDMA0_MCU_HOST_INT_ENA,
+ MT_MCU_CMD_WDT_MASK);
+
+ if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WA_WDT)
+ dev->recovery.wa_reset_count++;
+ else
+ dev->recovery.wm_reset_count++;
+
+ mt7915_mac_full_reset(dev);
+
+ /* enable mcu irq */
+ mt7915_irq_enable(dev, MT_INT_MCU_CMD);
+ mt7915_irq_disable(dev, 0);
+
+ /* enable WA/WM WDT */
+ mt76_set(dev, MT_WFDMA0_MCU_HOST_INT_ENA, MT_MCU_CMD_WDT_MASK);
+
+ dev->recovery.state = MT_MCU_CMD_NORMAL_STATE;
+ dev->recovery.restart = false;
+ return;
+ }
+
+ /* chip partial reset */
+ if (!(READ_ONCE(dev->recovery.state) & MT_MCU_CMD_STOP_DMA))
+ return;
+
+ if (mtk_wed_device_active(&dev->mt76.mmio.wed)) {
+ mtk_wed_device_stop(&dev->mt76.mmio.wed);
+ if (!is_mt798x(&dev->mt76))
+ mt76_wr(dev, MT_INT_WED_MASK_CSR, 0);
+ }
+
+ ieee80211_stop_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_stop_queues(ext_phy->hw);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ cancel_delayed_work_sync(&dev->mphy.mac_work);
+ if (phy2) {
+ set_bit(MT76_RESET, &phy2->mt76->state);
+ cancel_delayed_work_sync(&phy2->mt76->mac_work);
+ }
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_disable(&dev->mt76.napi[i]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_STOPPED);
+
+ if (mt7915_wait_reset_state(dev, MT_MCU_CMD_RESET_DONE)) {
+ mt7915_dma_reset(dev, false);
+
+ mt76_connac2_tx_token_put(&dev->mt76);
+ idr_init(&dev->mt76.token);
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_DMA_INIT);
+ mt7915_wait_reset_state(dev, MT_MCU_CMD_RECOVERY_DONE);
+ }
+
+ mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
+ mt7915_wait_reset_state(dev, MT_MCU_CMD_NORMAL_STATE);
+
+ /* enable DMA Tx/Rx and interrupt */
+ mt7915_dma_start(dev, false, false);
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ clear_bit(MT76_RESET, &dev->mphy.state);
+ if (phy2)
+ clear_bit(MT76_RESET, &phy2->mt76->state);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
+ local_bh_enable();
+
+ tasklet_schedule(&dev->mt76.irq_tasklet);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ local_bh_disable();
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+ local_bh_enable();
+
+ ieee80211_wake_queues(mt76_hw(dev));
+ if (ext_phy)
+ ieee80211_wake_queues(ext_phy->hw);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt7915_update_beacons(dev);
+
+ ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
+ MT7915_WATCHDOG_TIME);
+ if (phy2)
+ ieee80211_queue_delayed_work(ext_phy->hw,
+ &phy2->mt76->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
+/* firmware coredump */
+void mt7915_mac_dump_work(struct work_struct *work)
+{
+ const struct mt7915_mem_region *mem_region;
+ struct mt7915_crash_data *crash_data;
+ struct mt7915_dev *dev;
+ struct mt7915_mem_hdr *hdr;
+ size_t buf_len;
+ int i;
+ u32 num;
+ u8 *buf;
+
+ dev = container_of(work, struct mt7915_dev, dump_work);
+
+ mutex_lock(&dev->dump_mutex);
+
+ crash_data = mt7915_coredump_new(dev);
+ if (!crash_data) {
+ mutex_unlock(&dev->dump_mutex);
+ goto skip_coredump;
+ }
+
+ mem_region = mt7915_coredump_get_mem_layout(dev, &num);
+ if (!mem_region || !crash_data->memdump_buf_len) {
+ mutex_unlock(&dev->dump_mutex);
+ goto skip_memdump;
+ }
+
+ buf = crash_data->memdump_buf;
+ buf_len = crash_data->memdump_buf_len;
+
+ /* dumping memory content... */
+ memset(buf, 0, buf_len);
+ for (i = 0; i < num; i++) {
+ if (mem_region->len > buf_len) {
+ dev_warn(dev->mt76.dev, "%s len %lu is too large\n",
+ mem_region->name,
+ (unsigned long)mem_region->len);
+ break;
+ }
+
+ /* reserve space for the header */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ mt7915_memcpy_fromio(dev, buf, mem_region->start,
+ mem_region->len);
+
+ hdr->start = mem_region->start;
+ hdr->len = mem_region->len;
+
+ if (!mem_region->len)
+ /* note: the header remains, just with zero length */
+ break;
+
+ buf += mem_region->len;
+ buf_len -= mem_region->len;
+
+ mem_region++;
+ }
+
+ mutex_unlock(&dev->dump_mutex);
+
+skip_memdump:
+ mt7915_coredump_submit(dev);
+skip_coredump:
+ queue_work(dev->mt76.wq, &dev->reset_work);
+}
+
+void mt7915_reset(struct mt7915_dev *dev)
+{
+ if (!dev->recovery.hw_init_done)
+ return;
+
+ if (dev->recovery.hw_full_reset)
+ return;
+
+ /* wm/wa exception: do full recovery */
+ if (READ_ONCE(dev->recovery.state) & MT_MCU_CMD_WDT_MASK) {
+ dev->recovery.restart = true;
+ dev_info(dev->mt76.dev,
+ "%s indicated firmware crash, attempting recovery\n",
+ wiphy_name(dev->mt76.hw->wiphy));
+
+ mt7915_irq_disable(dev, MT_INT_MCU_CMD);
+ queue_work(dev->mt76.wq, &dev->dump_work);
+ return;
+ }
+
+ queue_work(dev->mt76.wq, &dev->reset_work);
+ wake_up(&dev->reset_wait);
+}
+
+void mt7915_mac_update_stats(struct mt7915_phy *phy)
+{
+ struct mt76_mib_stats *mib = &phy->mib;
+ struct mt7915_dev *dev = phy->dev;
+ int i, aggr0 = 0, aggr1, cnt;
+ u8 band = phy->mt76->band_idx;
+ u32 val;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR3(band));
+ mib->fcs_err_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR4(band));
+ mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR5(band));
+ mib->rx_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR6(band));
+ mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR7(band));
+ mib->rx_vector_mismatch_cnt +=
+ FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR8(band));
+ mib->rx_delimiter_fail_cnt +=
+ FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR10(band));
+ mib->rx_mrdy_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR11(band));
+ mib->rx_len_mismatch_cnt +=
+ FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR12(band));
+ mib->tx_ampdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR13(band));
+ mib->tx_stop_q_empty_cnt +=
+ FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR14(band));
+ mib->tx_mpdu_attempts_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR15(band));
+ mib->tx_mpdu_success_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR16(band));
+ mib->primary_cca_busy_time +=
+ FIELD_GET(MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR17(band));
+ mib->secondary_cca_busy_time +=
+ FIELD_GET(MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR18(band));
+ mib->primary_energy_detect_time +=
+ FIELD_GET(MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR19(band));
+ mib->cck_mdrdy_time += FIELD_GET(MT_MIB_SDR19_CCK_MDRDY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR20(band));
+ mib->ofdm_mdrdy_time +=
+ FIELD_GET(MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR21(band));
+ mib->green_mdrdy_time +=
+ FIELD_GET(MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR22(band));
+ mib->rx_ampdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR23(band));
+ mib->rx_ampdu_bytes_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR24(band));
+ mib->rx_ampdu_valid_subframe_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR25(band));
+ mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR27(band));
+ mib->tx_rwp_fail_cnt +=
+ FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR28(band));
+ mib->tx_rwp_need_cnt +=
+ FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR29(band));
+ mib->rx_pfdrop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDRVEC(band));
+ mib->rx_vec_queue_overflow_drop_cnt += is_mt7915(&dev->mt76) ?
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR31(band));
+ mib->rx_ba_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDRMUBF(band));
+ mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_DR8(band));
+ mib->tx_mu_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_DR9(band));
+ mib->tx_mu_acked_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_DR11(band));
+ mib->tx_su_acked_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_ETBF_PAR_RPT0(band));
+ mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_PAR_RPT0_FB_BW, cnt);
+ mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NC, cnt);
+ mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_PAR_RPT0_FB_NR, cnt);
+
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
+ cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
+ mib->tx_amsdu[i] += cnt;
+ mib->tx_amsdu_cnt += cnt;
+ }
+
+ if (is_mt7915(&dev->mt76)) {
+ for (i = 0, aggr1 = aggr0 + 8; i < 4; i++) {
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 4)));
+ mib->ba_miss_cnt +=
+ FIELD_GET(MT_MIB_BA_MISS_COUNT_MASK, val);
+ mib->ack_fail_cnt +=
+ FIELD_GET(MT_MIB_ACK_FAIL_COUNT_MASK, val);
+
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 4)));
+ mib->rts_cnt += FIELD_GET(MT_MIB_RTS_COUNT_MASK, val);
+ mib->rts_retries_cnt +=
+ FIELD_GET(MT_MIB_RTS_RETRIES_COUNT_MASK, val);
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
+ phy->mt76->aggr_stats[aggr0++] += val & 0xffff;
+ phy->mt76->aggr_stats[aggr0++] += val >> 16;
+
+ val = mt76_rr(dev, MT_TX_AGG_CNT2(band, i));
+ phy->mt76->aggr_stats[aggr1++] += val & 0xffff;
+ phy->mt76->aggr_stats[aggr1++] += val >> 16;
+ }
+
+ cnt = mt76_rr(dev, MT_MIB_SDR32(band));
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR33(band));
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(band));
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(band));
+ mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
+ mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(band));
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
+ } else {
+ for (i = 0; i < 2; i++) {
+ /* rts count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR0(band, (i << 2)));
+ mib->rts_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* rts retry count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR1(band, (i << 2)));
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->rts_retries_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ba miss count */
+ val = mt76_rr(dev, MT_MIB_MB_SDR2(band, (i << 2)));
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ba_miss_cnt += FIELD_GET(GENMASK(31, 16), val);
+
+ /* ack fail count */
+ val = mt76_rr(dev, MT_MIB_MB_BFTF(band, (i << 2)));
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(15, 0), val);
+ mib->ack_fail_cnt += FIELD_GET(GENMASK(31, 16), val);
+ }
+
+ for (i = 0; i < 8; i++) {
+ val = mt76_rr(dev, MT_TX_AGG_CNT(band, i));
+ phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(15, 0), val);
+ phy->mt76->aggr_stats[aggr0++] += FIELD_GET(GENMASK(31, 16), val);
+ }
+
+ cnt = mt76_rr(dev, MT_MIB_SDR32(band));
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_IBF_CNT, cnt);
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR7(band));
+ mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_MIB_BFCR7_BFEE_TX_FB_CPL, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR2(band));
+ mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_MIB_BFCR2_BFEE_TX_FB_TRIG, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR0(band));
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR0_RX_FB_HT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_BFCR1(band));
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_MIB_BFCR1_RX_FB_HE, cnt);
+ }
+}
+
+static void mt7915_mac_severe_check(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ u32 trb;
+
+ if (!phy->omac_mask)
+ return;
+
+ /* In rare cases, TRB pointers might be out of sync leads to RMAC
+ * stopping Rx, so check status periodically to see if TRB hardware
+ * requires minimal recovery.
+ */
+ trb = mt76_rr(dev, MT_TRB_RXPSR0(phy->mt76->band_idx));
+
+ if ((FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, trb) !=
+ FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, trb)) &&
+ (FIELD_GET(MT_TRB_RXPSR0_RX_RMAC_PTR, phy->trb_ts) !=
+ FIELD_GET(MT_TRB_RXPSR0_RX_WTBL_PTR, phy->trb_ts)) &&
+ trb == phy->trb_ts)
+ mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L3_RX_ABORT,
+ phy->mt76->band_idx);
+
+ phy->trb_ts = trb;
+}
+
+void mt7915_mac_sta_rc_work(struct work_struct *work)
+{
+ struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
+ struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif;
+ struct mt7915_sta *msta;
+ u32 changed;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ list_splice_init(&dev->sta_rc_list, &list);
+
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7915_sta, rc_list);
+ list_del_init(&msta->rc_list);
+ changed = msta->changed;
+ msta->changed = 0;
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+
+ if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED |
+ IEEE80211_RC_BW_CHANGED))
+ mt7915_mcu_add_rate_ctrl(dev, vif, sta, true);
+
+ if (changed & IEEE80211_RC_SMPS_CHANGED)
+ mt7915_mcu_add_smps(dev, vif, sta);
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ }
+
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+}
+
+void mt7915_mac_work(struct work_struct *work)
+{
+ struct mt7915_phy *phy;
+ struct mt76_phy *mphy;
+
+ mphy = (struct mt76_phy *)container_of(work, struct mt76_phy,
+ mac_work.work);
+ phy = mphy->priv;
+
+ mutex_lock(&mphy->dev->mutex);
+
+ mt76_update_survey(mphy);
+ if (++mphy->mac_work_count == 5) {
+ mphy->mac_work_count = 0;
+
+ mt7915_mac_update_stats(phy);
+ mt7915_mac_severe_check(phy);
+
+ if (phy->dev->muru_debug)
+ mt7915_mcu_muru_debug_get(phy);
+ }
+
+ mutex_unlock(&mphy->dev->mutex);
+
+ mt76_tx_status_check(mphy->dev, false);
+
+ ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
+ MT7915_WATCHDOG_TIME);
+}
+
+static void mt7915_dfs_stop_radar_detector(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+
+ if (phy->rdd_state & BIT(0))
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 0,
+ MT_RX_SEL0, 0);
+ if (phy->rdd_state & BIT(1))
+ mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, 1,
+ MT_RX_SEL0, 0);
+}
+
+static int mt7915_dfs_start_rdd(struct mt7915_dev *dev, int chain)
+{
+ int err, region;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, chain,
+ MT_RX_SEL0, region);
+ if (err < 0)
+ return err;
+
+ if (is_mt7915(&dev->mt76)) {
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT, chain,
+ 0, dev->dbdc_support ? 2 : 0);
+ if (err < 0)
+ return err;
+ }
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_DET_MODE, chain,
+ MT_RX_SEL0, 1);
+}
+
+static int mt7915_dfs_start_radar_detector(struct mt7915_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct mt7915_dev *dev = phy->dev;
+ int err;
+
+ /* start CAC */
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_START,
+ phy->mt76->band_idx, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ err = mt7915_dfs_start_rdd(dev, phy->mt76->band_idx);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(phy->mt76->band_idx);
+
+ if (!is_mt7915(&dev->mt76))
+ return 0;
+
+ if (chandef->width == NL80211_CHAN_WIDTH_160 ||
+ chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ err = mt7915_dfs_start_rdd(dev, 1);
+ if (err < 0)
+ return err;
+
+ phy->rdd_state |= BIT(1);
+ }
+
+ return 0;
+}
+
+static int
+mt7915_dfs_init_radar_specs(struct mt7915_phy *phy)
+{
+ const struct mt7915_dfs_radar_spec *radar_specs;
+ struct mt7915_dev *dev = phy->dev;
+ int err, i;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_FCC:
+ radar_specs = &fcc_radar_specs;
+ err = mt7915_mcu_set_fcc5_lpn(dev, 8);
+ if (err < 0)
+ return err;
+ break;
+ case NL80211_DFS_ETSI:
+ radar_specs = &etsi_radar_specs;
+ break;
+ case NL80211_DFS_JP:
+ radar_specs = &jp_radar_specs;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(radar_specs->radar_pattern); i++) {
+ err = mt7915_mcu_set_radar_th(dev, i,
+ &radar_specs->radar_pattern[i]);
+ if (err < 0)
+ return err;
+ }
+
+ return mt7915_mcu_set_pulse_th(dev, &radar_specs->pulse_th);
+}
+
+int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ enum mt76_dfs_state dfs_state, prev_state;
+ int err;
+
+ prev_state = phy->mt76->dfs_state;
+ dfs_state = mt76_phy_dfs_state(phy->mt76);
+
+ if (prev_state == dfs_state)
+ return 0;
+
+ if (prev_state == MT_DFS_STATE_UNKNOWN)
+ mt7915_dfs_stop_radar_detector(phy);
+
+ if (dfs_state == MT_DFS_STATE_DISABLED)
+ goto stop;
+
+ if (prev_state <= MT_DFS_STATE_DISABLED) {
+ err = mt7915_dfs_init_radar_specs(phy);
+ if (err < 0)
+ return err;
+
+ err = mt7915_dfs_start_radar_detector(phy);
+ if (err < 0)
+ return err;
+
+ phy->mt76->dfs_state = MT_DFS_STATE_CAC;
+ }
+
+ if (dfs_state == MT_DFS_STATE_CAC)
+ return 0;
+
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_CAC_END,
+ phy->mt76->band_idx, MT_RX_SEL0, 0);
+ if (err < 0) {
+ phy->mt76->dfs_state = MT_DFS_STATE_UNKNOWN;
+ return err;
+ }
+
+ phy->mt76->dfs_state = MT_DFS_STATE_ACTIVE;
+ return 0;
+
+stop:
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_NORMAL_START,
+ phy->mt76->band_idx, MT_RX_SEL0, 0);
+ if (err < 0)
+ return err;
+
+ if (is_mt7915(&dev->mt76)) {
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_SET_WF_ANT,
+ phy->mt76->band_idx, 0,
+ dev->dbdc_support ? 2 : 0);
+ if (err < 0)
+ return err;
+ }
+
+ mt7915_dfs_stop_radar_detector(phy);
+ phy->mt76->dfs_state = MT_DFS_STATE_DISABLED;
+
+ return 0;
+}
+
+static int
+mt7915_mac_twt_duration_align(int duration)
+{
+ return duration << 8;
+}
+
+static u64
+mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev,
+ struct mt7915_twt_flow *flow)
+{
+ struct mt7915_twt_flow *iter, *iter_next;
+ u32 duration = flow->duration << 8;
+ u64 start_tsf;
+
+ iter = list_first_entry_or_null(&dev->twt_list,
+ struct mt7915_twt_flow, list);
+ if (!iter || !iter->sched || iter->start_tsf > duration) {
+ /* add flow as first entry in the list */
+ list_add(&flow->list, &dev->twt_list);
+ return 0;
+ }
+
+ list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
+ start_tsf = iter->start_tsf +
+ mt7915_mac_twt_duration_align(iter->duration);
+ if (list_is_last(&iter->list, &dev->twt_list))
+ break;
+
+ if (!iter_next->sched ||
+ iter_next->start_tsf > start_tsf + duration) {
+ list_add(&flow->list, &iter->list);
+ goto out;
+ }
+ }
+
+ /* add flow as last entry in the list */
+ list_add_tail(&flow->list, &dev->twt_list);
+out:
+ return start_tsf;
+}
+
+static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt)
+{
+ struct ieee80211_twt_params *twt_agrt;
+ u64 interval, duration;
+ u16 mantissa;
+ u8 exp;
+
+ /* only individual agreement supported */
+ if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
+ return -EOPNOTSUPP;
+
+ /* only 256us unit supported */
+ if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
+ return -EOPNOTSUPP;
+
+ twt_agrt = (struct ieee80211_twt_params *)twt->params;
+
+ /* explicit agreement not supported */
+ if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
+ return -EOPNOTSUPP;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
+ le16_to_cpu(twt_agrt->req_type));
+ mantissa = le16_to_cpu(twt_agrt->mantissa);
+ duration = twt_agrt->min_twt_dur << 8;
+
+ interval = (u64)mantissa << exp;
+ if (interval < duration)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static bool
+mt7915_mac_twt_param_equal(struct mt7915_sta *msta,
+ struct ieee80211_twt_params *twt_agrt)
+{
+ u16 type = le16_to_cpu(twt_agrt->req_type);
+ u8 exp;
+ int i;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, type);
+ for (i = 0; i < MT7915_MAX_STA_TWT_AGRT; i++) {
+ struct mt7915_twt_flow *f;
+
+ if (!(msta->twt.flowid_mask & BIT(i)))
+ continue;
+
+ f = &msta->twt.flow[i];
+ if (f->duration == twt_agrt->min_twt_dur &&
+ f->mantissa == twt_agrt->mantissa &&
+ f->exp == exp &&
+ f->protection == !!(type & IEEE80211_TWT_REQTYPE_PROTECTION) &&
+ f->flowtype == !!(type & IEEE80211_TWT_REQTYPE_FLOWTYPE) &&
+ f->trigger == !!(type & IEEE80211_TWT_REQTYPE_TRIGGER))
+ return true;
+ }
+
+ return false;
+}
+
+void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt)
+{
+ enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
+ u16 req_type = le16_to_cpu(twt_agrt->req_type);
+ enum ieee80211_twt_setup_cmd sta_setup_cmd;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_twt_flow *flow;
+ int flowid, table_id;
+ u8 exp;
+
+ if (mt7915_mac_check_twt_req(twt))
+ goto out;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT)
+ goto unlock;
+
+ if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
+ goto unlock;
+
+ if (twt_agrt->min_twt_dur < MT7915_MIN_TWT_DUR) {
+ setup_cmd = TWT_SETUP_CMD_DICTATE;
+ twt_agrt->min_twt_dur = MT7915_MIN_TWT_DUR;
+ goto unlock;
+ }
+
+ flowid = ffs(~msta->twt.flowid_mask) - 1;
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_FLOWID);
+ twt_agrt->req_type |= le16_encode_bits(flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
+
+ table_id = ffs(~dev->twt.table_mask) - 1;
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
+ sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
+
+ if (mt7915_mac_twt_param_equal(msta, twt_agrt))
+ goto unlock;
+
+ flow = &msta->twt.flow[flowid];
+ memset(flow, 0, sizeof(*flow));
+ INIT_LIST_HEAD(&flow->list);
+ flow->wcid = msta->wcid.idx;
+ flow->table_id = table_id;
+ flow->id = flowid;
+ flow->duration = twt_agrt->min_twt_dur;
+ flow->mantissa = twt_agrt->mantissa;
+ flow->exp = exp;
+ flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
+ flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
+ flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
+
+ if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
+ sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
+ u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
+ u64 flow_tsf, curr_tsf;
+ u32 rem;
+
+ flow->sched = true;
+ flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow);
+ curr_tsf = __mt7915_get_tsf(hw, msta->vif);
+ div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
+ flow_tsf = curr_tsf + interval - rem;
+ twt_agrt->twt = cpu_to_le64(flow_tsf);
+ } else {
+ list_add_tail(&flow->list, &dev->twt_list);
+ }
+ flow->tsf = le64_to_cpu(twt_agrt->twt);
+
+ if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
+ goto unlock;
+
+ setup_cmd = TWT_SETUP_CMD_ACCEPT;
+ dev->twt.table_mask |= BIT(table_id);
+ msta->twt.flowid_mask |= BIT(flowid);
+ dev->twt.n_agrt++;
+
+unlock:
+ mutex_unlock(&dev->mt76.mutex);
+out:
+ twt_agrt->req_type &= ~cpu_to_le16(IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt_agrt->req_type |=
+ le16_encode_bits(setup_cmd, IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
+ (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
+}
+
+void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
+ struct mt7915_sta *msta,
+ u8 flowid)
+{
+ struct mt7915_twt_flow *flow;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ if (flowid >= ARRAY_SIZE(msta->twt.flow))
+ return;
+
+ if (!(msta->twt.flowid_mask & BIT(flowid)))
+ return;
+
+ flow = &msta->twt.flow[flowid];
+ if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow,
+ MCU_TWT_AGRT_DELETE))
+ return;
+
+ list_del_init(&flow->list);
+ msta->twt.flowid_mask &= ~BIT(flowid);
+ dev->twt.table_mask &= ~BIT(flow->table_id);
+ dev->twt.n_agrt--;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
new file mode 100644
index 0000000000..448b1b3801
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_MAC_H
+#define __MT7915_MAC_H
+
+#include "../mt76_connac2_mac.h"
+
+#define MT_TX_FREE_VER GENMASK(18, 16)
+#define MT_TX_FREE_MSDU_CNT_V0 GENMASK(6, 0)
+/* 0: success, others: dropped */
+#define MT_TX_FREE_COUNT GENMASK(12, 0)
+#define MT_TX_FREE_COUNT_V3 GENMASK(27, 24)
+#define MT_TX_FREE_STAT GENMASK(14, 13)
+#define MT_TX_FREE_STAT_V3 GENMASK(29, 28)
+#define MT_TX_FREE_MPDU_HEADER BIT(15)
+#define MT_TX_FREE_MPDU_HEADER_V3 BIT(30)
+#define MT_TX_FREE_MSDU_ID_V3 GENMASK(14, 0)
+
+#define MT_TXS5_F0_FINAL_MPDU BIT(31)
+#define MT_TXS5_F0_QOS BIT(30)
+#define MT_TXS5_F0_TX_COUNT GENMASK(29, 25)
+#define MT_TXS5_F0_FRONT_TIME GENMASK(24, 0)
+#define MT_TXS5_F1_MPDU_TX_COUNT GENMASK(31, 24)
+#define MT_TXS5_F1_MPDU_TX_BYTES GENMASK(23, 0)
+
+#define MT_TXS6_F0_NOISE_3 GENMASK(31, 24)
+#define MT_TXS6_F0_NOISE_2 GENMASK(23, 16)
+#define MT_TXS6_F0_NOISE_1 GENMASK(15, 8)
+#define MT_TXS6_F0_NOISE_0 GENMASK(7, 0)
+#define MT_TXS6_F1_MPDU_FAIL_COUNT GENMASK(31, 24)
+#define MT_TXS6_F1_MPDU_FAIL_BYTES GENMASK(23, 0)
+
+#define MT_TXS7_F0_RCPI_3 GENMASK(31, 24)
+#define MT_TXS7_F0_RCPI_2 GENMASK(23, 16)
+#define MT_TXS7_F0_RCPI_1 GENMASK(15, 8)
+#define MT_TXS7_F0_RCPI_0 GENMASK(7, 0)
+#define MT_TXS7_F1_MPDU_RETRY_COUNT GENMASK(31, 24)
+#define MT_TXS7_F1_MPDU_RETRY_BYTES GENMASK(23, 0)
+
+struct mt7915_dfs_pulse {
+ u32 max_width; /* us */
+ int max_pwr; /* dbm */
+ int min_pwr; /* dbm */
+ u32 min_stgr_pri; /* us */
+ u32 max_stgr_pri; /* us */
+ u32 min_cr_pri; /* us */
+ u32 max_cr_pri; /* us */
+};
+
+struct mt7915_dfs_pattern {
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ u32 min_pri;
+ u32 max_pri;
+ u8 max_pw;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+ u8 rsv[2];
+ u32 min_stgpr_diff;
+} __packed;
+
+struct mt7915_dfs_radar_spec {
+ struct mt7915_dfs_pulse pulse_th;
+ struct mt7915_dfs_pattern radar_pattern[16];
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
new file mode 100644
index 0000000000..3196f56cdf
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -0,0 +1,1700 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include "mt7915.h"
+#include "mcu.h"
+
+static bool mt7915_dev_running(struct mt7915_dev *dev)
+{
+ struct mt7915_phy *phy;
+
+ if (test_bit(MT76_STATE_RUNNING, &dev->mphy.state))
+ return true;
+
+ phy = mt7915_ext_phy(dev);
+
+ return phy && test_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+}
+
+int mt7915_run(struct ieee80211_hw *hw)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool running;
+ int ret;
+
+ running = mt7915_dev_running(dev);
+
+ if (!running) {
+ ret = mt76_connac_mcu_set_pm(&dev->mt76,
+ dev->phy.mt76->band_idx, 0);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_mac(dev, dev->phy.mt76->band_idx,
+ true, true);
+ if (ret)
+ goto out;
+
+ mt7915_mac_enable_nf(dev, dev->phy.mt76->band_idx);
+ }
+
+ if (phy != &dev->phy) {
+ ret = mt76_connac_mcu_set_pm(&dev->mt76,
+ phy->mt76->band_idx, 0);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_mac(dev, phy->mt76->band_idx,
+ true, true);
+ if (ret)
+ goto out;
+
+ mt7915_mac_enable_nf(dev, phy->mt76->band_idx);
+ }
+
+ ret = mt7915_mcu_set_thermal_throttling(phy,
+ MT7915_THERMAL_THROTTLE_MAX);
+
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_thermal_protect(phy);
+
+ if (ret)
+ goto out;
+
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b,
+ phy->mt76->band_idx);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_sku_en(phy, true);
+ if (ret)
+ goto out;
+
+ ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
+ if (ret)
+ goto out;
+
+ set_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (!mt76_testmode_enabled(phy->mt76))
+ ieee80211_queue_delayed_work(hw, &phy->mt76->mac_work,
+ MT7915_WATCHDOG_TIME);
+
+ if (!running)
+ mt7915_mac_reset_counters(phy);
+
+out:
+ return ret;
+}
+
+static int mt7915_start(struct ieee80211_hw *hw)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ int ret;
+
+ flush_work(&dev->init_work);
+
+ mutex_lock(&dev->mt76.mutex);
+ ret = mt7915_run(hw);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void mt7915_stop(struct ieee80211_hw *hw)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_testmode_reset(phy->mt76, true);
+
+ clear_bit(MT76_STATE_RUNNING, &phy->mt76->state);
+
+ if (phy != &dev->phy) {
+ mt76_connac_mcu_set_pm(&dev->mt76, phy->mt76->band_idx, 1);
+ mt7915_mcu_set_mac(dev, phy->mt76->band_idx, false, false);
+ }
+
+ if (!mt7915_dev_running(dev)) {
+ mt76_connac_mcu_set_pm(&dev->mt76, dev->phy.mt76->band_idx, 1);
+ mt7915_mcu_set_mac(dev, dev->phy.mt76->band_idx, false, false);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static inline int get_free_idx(u32 mask, u8 start, u8 end)
+{
+ return ffs(~mask & GENMASK(end, start));
+}
+
+static int get_omac_idx(enum nl80211_iftype type, u64 mask)
+{
+ int i;
+
+ switch (type) {
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ /* prefer hw bssid slot 1-3 */
+ i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
+ if (i)
+ return i - 1;
+
+ if (type != NL80211_IFTYPE_STATION)
+ break;
+
+ i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
+ if (i)
+ return i - 1;
+
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_AP:
+ /* ap uses hw bssid 0 and ext bssid */
+ if (~mask & BIT(HW_BSSID_0))
+ return HW_BSSID_0;
+
+ i = get_free_idx(mask, EXT_BSSID_1, EXT_BSSID_MAX);
+ if (i)
+ return i - 1;
+
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return -1;
+}
+
+static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
+ mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
+ mvif->bitrate_mask.control[i].he_gi = 0xff;
+ mvif->bitrate_mask.control[i].he_ltf = 0xff;
+ mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
+ memset(mvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(mvif->bitrate_mask.control[i].ht_mcs));
+ memset(mvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(mvif->bitrate_mask.control[i].vht_mcs));
+ memset(mvif->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(mvif->bitrate_mask.control[i].he_mcs));
+ }
+}
+
+static int mt7915_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt76_txq *mtxq;
+ bool ext_phy = phy != &dev->phy;
+ int idx, ret = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt76_testmode_reset(phy->mt76, true);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR &&
+ is_zero_ether_addr(vif->addr))
+ phy->monitor_vif = vif;
+
+ mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
+ if (mvif->mt76.idx >= (MT7915_MAX_INTERFACES << dev->dbdc_support)) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ idx = get_omac_idx(vif->type, phy->omac_mask);
+ if (idx < 0) {
+ ret = -ENOSPC;
+ goto out;
+ }
+ mvif->mt76.omac_idx = idx;
+ mvif->phy = phy;
+ mvif->mt76.band_idx = phy->mt76->band_idx;
+
+ mvif->mt76.wmm_idx = vif->type != NL80211_IFTYPE_AP;
+ if (ext_phy)
+ mvif->mt76.wmm_idx += 2;
+
+ ret = mt7915_mcu_add_dev_info(phy, vif, true);
+ if (ret)
+ goto out;
+
+ dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
+ phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+
+ idx = MT7915_WTBL_RESERVED - mvif->mt76.idx;
+
+ INIT_LIST_HEAD(&mvif->sta.rc_list);
+ INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
+ mvif->sta.wcid.idx = idx;
+ mvif->sta.wcid.phy_idx = ext_phy;
+ mvif->sta.wcid.hw_key_idx = -1;
+ mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_packet_id_init(&mvif->sta.wcid);
+
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ if (vif->txq) {
+ mtxq = (struct mt76_txq *)vif->txq->drv_priv;
+ mtxq->wcid = idx;
+ }
+
+ if (vif->type != NL80211_IFTYPE_AP &&
+ (!mvif->mt76.omac_idx || mvif->mt76.omac_idx > 3))
+ vif->offload_flags = 0;
+ vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
+
+ mt7915_init_bitrate_mask(vif);
+ memset(&mvif->cap, -1, sizeof(mvif->cap));
+
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ mt7915_mcu_add_sta(dev, vif, NULL, true);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void mt7915_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = &mvif->sta;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ int idx = msta->wcid.idx;
+
+ mt7915_mcu_add_bss_info(phy, vif, false);
+ mt7915_mcu_add_sta(dev, vif, NULL, false);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt76_testmode_reset(phy->mt76, true);
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (vif == phy->monitor_vif)
+ phy->monitor_vif = NULL;
+
+ mt7915_mcu_add_dev_info(phy, vif, false);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+
+ mutex_lock(&dev->mt76.mutex);
+ dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
+ phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
+ mutex_unlock(&dev->mt76.mutex);
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ if (!list_empty(&msta->wcid.poll_list))
+ list_del_init(&msta->wcid.poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ mt76_packet_id_flush(&dev->mt76, &msta->wcid);
+}
+
+int mt7915_set_channel(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int ret;
+
+ cancel_delayed_work_sync(&phy->mt76->mac_work);
+
+ mutex_lock(&dev->mt76.mutex);
+ set_bit(MT76_RESET, &phy->mt76->state);
+
+ mt76_set_channel(phy->mt76);
+
+ if (dev->flash_mode) {
+ ret = mt7915_mcu_apply_tx_dpd(phy);
+ if (ret)
+ goto out;
+ }
+
+ ret = mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(CHANNEL_SWITCH));
+ if (ret)
+ goto out;
+
+ mt7915_mac_set_timing(phy);
+ ret = mt7915_dfs_init_radar_detector(phy);
+ mt7915_mac_cca_stats_reset(phy);
+
+ mt7915_mac_reset_counters(phy);
+ phy->noise = 0;
+
+out:
+ clear_bit(MT76_RESET, &phy->mt76->state);
+ mutex_unlock(&dev->mt76.mutex);
+
+ mt76_txq_schedule_all(phy->mt76);
+
+ if (!mt76_testmode_enabled(phy->mt76))
+ ieee80211_queue_delayed_work(phy->mt76->hw,
+ &phy->mt76->mac_work,
+ MT7915_WATCHDOG_TIME);
+
+ return ret;
+}
+
+static int mt7915_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = sta ? (struct mt7915_sta *)sta->drv_priv :
+ &mvif->sta;
+ struct mt76_wcid *wcid = &msta->wcid;
+ u8 *wcid_keyidx = &wcid->hw_key_idx;
+ int idx = key->keyidx;
+ int err = 0;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ /* fall back to sw encryption for unsupported ciphers */
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ case WLAN_CIPHER_SUITE_SMS4:
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (cmd == SET_KEY && !sta && !mvif->mt76.cipher) {
+ mvif->mt76.cipher = mt76_connac_mcu_get_cipher(key->cipher);
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ }
+
+ if (cmd == SET_KEY) {
+ *wcid_keyidx = idx;
+ } else {
+ if (idx == *wcid_keyidx)
+ *wcid_keyidx = -1;
+ goto out;
+ }
+
+ mt76_wcid_key_setup(&dev->mt76, wcid, key);
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ key, MCU_EXT_CMD(STA_REC_UPDATE),
+ &msta->wcid, cmd);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+
+static int mt7915_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ int err = -EINVAL;
+
+ mutex_lock(&dev->mt76.mutex);
+ if (!cfg80211_chandef_valid(&phy->mt76->chandef))
+ goto out;
+
+ err = mt76_init_sar_power(hw, sar);
+ if (err)
+ goto out;
+
+ err = mt7915_mcu_set_txpower_sku(phy);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+
+static int mt7915_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ int ret;
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+#ifdef CONFIG_NL80211_TESTMODE
+ if (phy->mt76->test.state != MT76_TM_STATE_OFF) {
+ mutex_lock(&dev->mt76.mutex);
+ mt76_testmode_reset(phy->mt76, false);
+ mutex_unlock(&dev->mt76.mutex);
+ }
+#endif
+ ieee80211_stop_queues(hw);
+ ret = mt7915_set_channel(phy);
+ if (ret)
+ return ret;
+ ieee80211_wake_queues(hw);
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_POWER |
+ IEEE80211_CONF_CHANGE_CHANNEL)) {
+ ret = mt7915_mcu_set_txpower_sku(phy);
+ if (ret)
+ return ret;
+ }
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ bool enabled = !!(hw->conf.flags & IEEE80211_CONF_MONITOR);
+ bool band = phy->mt76->band_idx;
+
+ if (!enabled)
+ phy->rxfilter |= MT_WF_RFCR_DROP_OTHER_UC;
+ else
+ phy->rxfilter &= ~MT_WF_RFCR_DROP_OTHER_UC;
+
+ mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN,
+ enabled);
+ mt76_testmode_reset(phy->mt76, true);
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+ }
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static int
+mt7915_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ unsigned int link_id, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ /* no need to update right away, we'll get BSS_CHANGED_QOS */
+ queue = mt76_connac_lmac_mapping(queue);
+ mvif->queue_params[queue] = *params;
+
+ return 0;
+}
+
+static void mt7915_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy->mt76->band_idx;
+ u32 ctl_flags = MT_WF_RFCR1_DROP_ACK |
+ MT_WF_RFCR1_DROP_BF_POLL |
+ MT_WF_RFCR1_DROP_BA |
+ MT_WF_RFCR1_DROP_CFEND |
+ MT_WF_RFCR1_DROP_CFACK;
+ u32 flags = 0;
+
+#define MT76_FILTER(_flag, _hw) do { \
+ flags |= *total_flags & FIF_##_flag; \
+ phy->rxfilter &= ~(_hw); \
+ phy->rxfilter |= !(flags & FIF_##_flag) * (_hw); \
+ } while (0)
+
+ mutex_lock(&dev->mt76.mutex);
+
+ phy->rxfilter &= ~(MT_WF_RFCR_DROP_OTHER_BSS |
+ MT_WF_RFCR_DROP_OTHER_BEACON |
+ MT_WF_RFCR_DROP_FRAME_REPORT |
+ MT_WF_RFCR_DROP_PROBEREQ |
+ MT_WF_RFCR_DROP_MCAST_FILTERED |
+ MT_WF_RFCR_DROP_MCAST |
+ MT_WF_RFCR_DROP_BCAST |
+ MT_WF_RFCR_DROP_DUPLICATE |
+ MT_WF_RFCR_DROP_A2_BSSID |
+ MT_WF_RFCR_DROP_UNWANTED_CTL |
+ MT_WF_RFCR_DROP_STBC_MULTI);
+
+ MT76_FILTER(OTHER_BSS, MT_WF_RFCR_DROP_OTHER_TIM |
+ MT_WF_RFCR_DROP_A3_MAC |
+ MT_WF_RFCR_DROP_A3_BSSID);
+
+ MT76_FILTER(FCSFAIL, MT_WF_RFCR_DROP_FCSFAIL);
+
+ MT76_FILTER(CONTROL, MT_WF_RFCR_DROP_CTS |
+ MT_WF_RFCR_DROP_RTS |
+ MT_WF_RFCR_DROP_CTL_RSV |
+ MT_WF_RFCR_DROP_NDPA);
+
+ *total_flags = flags;
+ mt76_wr(dev, MT_WF_RFCR(band), phy->rxfilter);
+
+ if (*total_flags & FIF_CONTROL)
+ mt76_clear(dev, MT_WF_RFCR1(band), ctl_flags);
+ else
+ mt76_set(dev, MT_WF_RFCR1(band), ctl_flags);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_update_bss_color(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *bss_color)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP: {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ if (mvif->mt76.omac_idx > HW_BSSID_MAX)
+ return;
+ fallthrough;
+ }
+ case NL80211_IFTYPE_STATION:
+ mt7915_mcu_update_bss_color(dev, vif, bss_color);
+ break;
+ default:
+ break;
+ }
+}
+
+static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ int set_bss_info = -1, set_sta = -1;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ /*
+ * station mode uses BSSID to map the wlan entry to a peer,
+ * and then peer references bss_info_rfch to set bandwidth cap.
+ */
+ if (changed & BSS_CHANGED_BSSID &&
+ vif->type == NL80211_IFTYPE_STATION)
+ set_bss_info = set_sta = !is_zero_ether_addr(info->bssid);
+ if (changed & BSS_CHANGED_ASSOC)
+ set_bss_info = vif->cfg.assoc;
+ if (changed & BSS_CHANGED_BEACON_ENABLED &&
+ vif->type != NL80211_IFTYPE_AP)
+ set_bss_info = set_sta = info->enable_beacon;
+
+ if (set_bss_info == 1)
+ mt7915_mcu_add_bss_info(phy, vif, true);
+ if (set_sta == 1)
+ mt7915_mcu_add_sta(dev, vif, NULL, true);
+
+ if (changed & BSS_CHANGED_ERP_CTS_PROT)
+ mt7915_mac_enable_rtscts(dev, vif, info->use_cts_prot);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7915_mac_set_timing(phy);
+ }
+ }
+
+ /* ensure that enable txcmd_mode after bss_info */
+ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ mt7915_mcu_set_tx(dev, vif);
+
+ if (changed & BSS_CHANGED_HE_OBSS_PD)
+ mt7915_mcu_add_obss_spr(phy, vif, &info->he_obss_pd);
+
+ if (changed & BSS_CHANGED_HE_BSS_COLOR)
+ mt7915_update_bss_color(hw, vif, &info->he_bss_color);
+
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED))
+ mt7915_mcu_add_beacon(hw, vif, info->enable_beacon, changed);
+
+ if (changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
+ BSS_CHANGED_FILS_DISCOVERY))
+ mt7915_mcu_add_inband_discov(dev, vif, changed);
+
+ if (set_bss_info == 0)
+ mt7915_mcu_add_bss_info(phy, vif, false);
+ if (set_sta == 0)
+ mt7915_mcu_add_sta(dev, vif, NULL, false);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_vif_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_vif_cap *vc = &mvif->cap;
+
+ vc->ht_ldpc = vif->bss_conf.ht_ldpc;
+ vc->vht_ldpc = vif->bss_conf.vht_ldpc;
+ vc->vht_su_ebfer = vif->bss_conf.vht_su_beamformer;
+ vc->vht_su_ebfee = vif->bss_conf.vht_su_beamformee;
+ vc->vht_mu_ebfer = vif->bss_conf.vht_mu_beamformer;
+ vc->vht_mu_ebfee = vif->bss_conf.vht_mu_beamformee;
+ vc->he_ldpc = vif->bss_conf.he_ldpc;
+ vc->he_su_ebfer = vif->bss_conf.he_su_beamformer;
+ vc->he_su_ebfee = vif->bss_conf.he_su_beamformee;
+ vc->he_mu_ebfer = vif->bss_conf.he_mu_beamformer;
+}
+
+static int
+mt7915_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ int err;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7915_vif_check_caps(phy, vif);
+
+ err = mt7915_mcu_add_bss_info(phy, vif, true);
+ if (err)
+ goto out;
+ err = mt7915_mcu_add_sta(dev, vif, NULL, true);
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return err;
+}
+
+static void
+mt7915_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7915_mcu_add_sta(dev, vif, NULL, false);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7915_mcu_add_beacon(hw, vif, true, BSS_CHANGED_BEACON);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ bool ext_phy = mvif->phy != &dev->phy;
+ int ret, idx;
+
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
+ if (idx < 0)
+ return -ENOSPC;
+
+ INIT_LIST_HEAD(&msta->rc_list);
+ INIT_LIST_HEAD(&msta->wcid.poll_list);
+ msta->vif = mvif;
+ msta->wcid.sta = 1;
+ msta->wcid.idx = idx;
+ msta->wcid.phy_idx = ext_phy;
+ msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->jiffies = jiffies;
+
+ ewma_avg_signal_init(&msta->avg_ack_signal);
+
+ mt7915_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ ret = mt7915_mcu_add_sta(dev, vif, sta, true);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
+}
+
+void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ int i;
+
+ mt7915_mcu_add_sta(dev, vif, sta, false);
+
+ mt7915_mac_wtbl_update(dev, msta->wcid.idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+
+ for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
+ mt7915_mac_twt_teardown_flow(dev, msta, i);
+
+ spin_lock_bh(&mdev->sta_poll_lock);
+ if (!list_empty(&msta->wcid.poll_list))
+ list_del_init(&msta->wcid.poll_list);
+ if (!list_empty(&msta->rc_list))
+ list_del_init(&msta->rc_list);
+ spin_unlock_bh(&mdev->sta_poll_lock);
+}
+
+static void mt7915_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt76_phy *mphy = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+
+ if (control->sta) {
+ struct mt7915_sta *sta;
+
+ sta = (struct mt7915_sta *)control->sta->drv_priv;
+ wcid = &sta->wcid;
+ }
+
+ if (vif && !control->sta) {
+ struct mt7915_vif *mvif;
+
+ mvif = (struct mt7915_vif *)vif->drv_priv;
+ wcid = &mvif->sta.wcid;
+ }
+
+ mt76_tx(mphy, control->sta, wcid, skb);
+}
+
+static int mt7915_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ int ret;
+
+ mutex_lock(&dev->mt76.mutex);
+ ret = mt76_connac_mcu_set_rts_thresh(&dev->mt76, val,
+ phy->mt76->band_idx);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct ieee80211_sta *sta = params->sta;
+ struct ieee80211_txq *txq = sta->txq[params->tid];
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ u16 ssn = params->ssn;
+ struct mt76_txq *mtxq;
+ int ret = 0;
+
+ if (!txq)
+ return -EINVAL;
+
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+
+ mutex_lock(&dev->mt76.mutex);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ params->buf_size);
+ ret = mt7915_mcu_add_rx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ ret = mt7915_mcu_add_rx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mtxq->aggr = true;
+ mtxq->send_bar = false;
+ ret = mt7915_mcu_add_tx_ba(dev, params, true);
+ break;
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->wcid.ampdu_state);
+ ret = mt7915_mcu_add_tx_ba(dev, params, false);
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ set_bit(tid, &msta->wcid.ampdu_state);
+ ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ break;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ mtxq->aggr = false;
+ clear_bit(tid, &msta->wcid.ampdu_state);
+ ret = mt7915_mcu_add_tx_ba(dev, params, false);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ }
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7915_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NOTEXIST,
+ IEEE80211_STA_NONE);
+}
+
+static int
+mt7915_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ return mt76_sta_state(hw, vif, sta, IEEE80211_STA_NONE,
+ IEEE80211_STA_NOTEXIST);
+}
+
+static int
+mt7915_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt76_mib_stats *mib = &phy->mib;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ stats->dot11RTSSuccessCount = mib->rts_cnt;
+ stats->dot11RTSFailureCount = mib->rts_retries_cnt;
+ stats->dot11FCSErrorCount = mib->fcs_err_cnt;
+ stats->dot11ACKFailureCount = mib->ack_fail_cnt;
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy->mt76->band_idx;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf;
+ u16 n;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
+ /* TSF software read */
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_READ);
+ tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
+ tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
+
+ return tsf.t64;
+}
+
+static u64
+mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ u64 ret;
+
+ mutex_lock(&dev->mt76.mutex);
+ ret = __mt7915_get_tsf(hw, mvif);
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static void
+mt7915_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 timestamp)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy->mt76->band_idx;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+ u16 n;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
+ mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
+ /* TSF software overwrite */
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_WRITE);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_offset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ s64 timestamp)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ bool band = phy->mt76->band_idx;
+ union {
+ u64 t64;
+ u32 t32[2];
+ } tsf = { .t64 = timestamp, };
+ u16 n;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ n = mvif->mt76.omac_idx > HW_BSSID_MAX ? HW_BSSID_0
+ : mvif->mt76.omac_idx;
+ mt76_wr(dev, MT_LPON_UTTR0(band), tsf.t32[0]);
+ mt76_wr(dev, MT_LPON_UTTR1(band), tsf.t32[1]);
+ /* TSF software adjust*/
+ if (is_mt7915(&dev->mt76))
+ mt76_rmw(dev, MT_LPON_TCR(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
+ else
+ mt76_rmw(dev, MT_LPON_TCR_MT7916(band, n), MT_LPON_TCR_SW_MODE,
+ MT_LPON_TCR_SW_ADJUST);
+
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static void
+mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+
+ mutex_lock(&dev->mt76.mutex);
+ phy->coverage_class = max_t(s16, coverage_class, 0);
+ mt7915_mac_set_timing(phy);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static int
+mt7915_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ int max_nss = hweight8(hw->wiphy->available_antennas_tx);
+ u8 chainshift = dev->chainshift;
+ u8 band = phy->mt76->band_idx;
+
+ if (!tx_ant || tx_ant != rx_ant || ffs(tx_ant) > max_nss)
+ return -EINVAL;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ phy->mt76->antenna_mask = tx_ant;
+
+ /* handle a variant of mt7916/mt7981 which has 3T3R but nss2 on 5 GHz band */
+ if ((is_mt7916(&dev->mt76) || is_mt7981(&dev->mt76)) &&
+ band && hweight8(tx_ant) == max_nss)
+ phy->mt76->chainmask = (dev->chainmask >> chainshift) << chainshift;
+ else
+ phy->mt76->chainmask = tx_ant << (chainshift * band);
+
+ mt76_set_stream_caps(phy->mt76, true);
+ mt7915_set_stream_vht_txbf_caps(phy);
+ mt7915_set_stream_he_caps(phy);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt7915_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct rate_info *txrate = &msta->wcid.rate;
+ struct rate_info rxrate = {};
+
+ if (is_mt7915(&phy->dev->mt76) &&
+ !mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
+ sinfo->rxrate = rxrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
+ }
+
+ if (txrate->legacy || txrate->flags) {
+ if (txrate->legacy) {
+ sinfo->txrate.legacy = txrate->legacy;
+ } else {
+ sinfo->txrate.mcs = txrate->mcs;
+ sinfo->txrate.nss = txrate->nss;
+ sinfo->txrate.bw = txrate->bw;
+ sinfo->txrate.he_gi = txrate->he_gi;
+ sinfo->txrate.he_dcm = txrate->he_dcm;
+ sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc;
+ }
+ sinfo->txrate.flags = txrate->flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+ }
+
+ /* offloading flows bypass networking stack, so driver counts and
+ * reports sta statistics via NL80211_STA_INFO when WED is active.
+ */
+ if (mtk_wed_device_active(&phy->dev->mt76.mmio.wed)) {
+ sinfo->tx_bytes = msta->wcid.stats.tx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BYTES64);
+
+ if (!mt7915_mcu_wed_wa_tx_stats(phy->dev, msta->wcid.idx)) {
+ sinfo->tx_packets = msta->wcid.stats.tx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_PACKETS);
+ }
+
+ if (mtk_wed_get_rx_capa(&phy->dev->mt76.mmio.wed)) {
+ sinfo->rx_bytes = msta->wcid.stats.rx_bytes;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BYTES64);
+
+ sinfo->rx_packets = msta->wcid.stats.rx_packets;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_PACKETS);
+ }
+ }
+
+ sinfo->tx_failed = msta->wcid.stats.tx_failed;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
+
+ sinfo->tx_retries = msta->wcid.stats.tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+
+ sinfo->ack_signal = (s8)msta->ack_signal;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
+
+ sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
+}
+
+static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ u32 *changed = data;
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ msta->changed |= *changed;
+ if (list_empty(&msta->rc_list))
+ list_add_tail(&msta->rc_list, &dev->sta_rc_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+}
+
+static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 changed)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+
+ mt7915_sta_rc_work(&changed, sta);
+ ieee80211_queue_work(hw, &dev->rc_work);
+}
+
+static int
+mt7915_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+ u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
+
+ mvif->bitrate_mask = *mask;
+
+ /* if multiple rates across different preambles are given we can
+ * reconfigure this info with all peers using sta_rec command with
+ * the below exception cases.
+ * - single rate : if a rate is passed along with different preambles,
+ * we select the highest one as fixed rate. i.e VHT MCS for VHT peers.
+ * - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT
+ * then multiple MCS setting (MCS 4,5,6) is not supported.
+ */
+ ieee80211_iterate_stations_atomic(hw, mt7915_sta_rc_work, &changed);
+ ieee80211_queue_work(hw, &dev->rc_work);
+
+ return 0;
+}
+
+static void mt7915_sta_set_4addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool enabled)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_4ADDR, &msta->wcid.flags);
+
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
+}
+
+static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ bool enabled)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+
+ mt76_connac_mcu_wtbl_update_hdr_trans(&dev->mt76, vif, sta);
+}
+
+static int mt7915_sta_set_txpwr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ s16 txpower = sta->deflink.txpwr.power;
+ int ret;
+
+ if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC)
+ txpower = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ /* NOTE: temporarily use 0 as minimum limit, which is a
+ * global setting and will be applied to all stations.
+ */
+ ret = mt7915_mcu_set_txpower_frame_min(phy, 0);
+ if (ret)
+ goto out;
+
+ /* This only applies to data frames while pushing traffic,
+ * whereas the management frames or other packets that are
+ * using fixed rate can be configured via TxD.
+ */
+ ret = mt7915_mcu_set_txpower_frame(phy, vif, sta, txpower);
+
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_ampdu_cnt",
+ "tx_stop_q_empty_cnt",
+ "tx_mpdu_attempts",
+ "tx_mpdu_success",
+ "tx_rwp_fail_cnt",
+ "tx_rwp_need_cnt",
+ "tx_pkt_ebf_cnt",
+ "tx_pkt_ibf_cnt",
+ "tx_ampdu_len:0-1",
+ "tx_ampdu_len:2-10",
+ "tx_ampdu_len:11-19",
+ "tx_ampdu_len:20-28",
+ "tx_ampdu_len:29-37",
+ "tx_ampdu_len:38-46",
+ "tx_ampdu_len:47-55",
+ "tx_ampdu_len:56-79",
+ "tx_ampdu_len:80-103",
+ "tx_ampdu_len:104-127",
+ "tx_ampdu_len:128-151",
+ "tx_ampdu_len:152-175",
+ "tx_ampdu_len:176-199",
+ "tx_ampdu_len:200-223",
+ "tx_ampdu_len:224-247",
+ "ba_miss_count",
+ "tx_beamformer_ppdu_iBF",
+ "tx_beamformer_ppdu_eBF",
+ "tx_beamformer_rx_feedback_all",
+ "tx_beamformer_rx_feedback_he",
+ "tx_beamformer_rx_feedback_vht",
+ "tx_beamformer_rx_feedback_ht",
+ "tx_beamformer_rx_feedback_bw", /* zero based idx: 20, 40, 80, 160 */
+ "tx_beamformer_rx_feedback_nc",
+ "tx_beamformer_rx_feedback_nr",
+ "tx_beamformee_ok_feedback_pkts",
+ "tx_beamformee_feedback_trig",
+ "tx_mu_beamforming",
+ "tx_mu_mpdu",
+ "tx_mu_successful_mpdu",
+ "tx_su_successful_mpdu",
+ "tx_msdu_pack_1",
+ "tx_msdu_pack_2",
+ "tx_msdu_pack_3",
+ "tx_msdu_pack_4",
+ "tx_msdu_pack_5",
+ "tx_msdu_pack_6",
+ "tx_msdu_pack_7",
+ "tx_msdu_pack_8",
+
+ /* rx counters */
+ "rx_fifo_full_cnt",
+ "rx_mpdu_cnt",
+ "channel_idle_cnt",
+ "primary_cca_busy_time",
+ "secondary_cca_busy_time",
+ "primary_energy_detect_time",
+ "cck_mdrdy_time",
+ "ofdm_mdrdy_time",
+ "green_mdrdy_time",
+ "rx_vector_mismatch_cnt",
+ "rx_delimiter_fail_cnt",
+ "rx_mrdy_cnt",
+ "rx_len_mismatch_cnt",
+ "rx_ampdu_cnt",
+ "rx_ampdu_bytes_cnt",
+ "rx_ampdu_valid_subframe_cnt",
+ "rx_ampdu_valid_subframe_b_cnt",
+ "rx_pfdrop_cnt",
+ "rx_vec_queue_overflow_drop_cnt",
+ "rx_ba_cnt",
+
+ /* muru mu-mimo and ofdma related stats */
+ "dl_cck_cnt",
+ "dl_ofdm_cnt",
+ "dl_htmix_cnt",
+ "dl_htgf_cnt",
+ "dl_vht_su_cnt",
+ "dl_vht_2mu_cnt",
+ "dl_vht_3mu_cnt",
+ "dl_vht_4mu_cnt",
+ "dl_he_su_cnt",
+ "dl_he_ext_su_cnt",
+ "dl_he_2ru_cnt",
+ "dl_he_2mu_cnt",
+ "dl_he_3ru_cnt",
+ "dl_he_3mu_cnt",
+ "dl_he_4ru_cnt",
+ "dl_he_4mu_cnt",
+ "dl_he_5to8ru_cnt",
+ "dl_he_9to16ru_cnt",
+ "dl_he_gtr16ru_cnt",
+
+ "ul_hetrig_su_cnt",
+ "ul_hetrig_2ru_cnt",
+ "ul_hetrig_3ru_cnt",
+ "ul_hetrig_4ru_cnt",
+ "ul_hetrig_5to8ru_cnt",
+ "ul_hetrig_9to16ru_cnt",
+ "ul_hetrig_gtr16ru_cnt",
+ "ul_hetrig_2mu_cnt",
+ "ul_hetrig_3mu_cnt",
+ "ul_hetrig_4mu_cnt",
+
+ /* per vif counters */
+ "v_tx_mode_cck",
+ "v_tx_mode_ofdm",
+ "v_tx_mode_ht",
+ "v_tx_mode_ht_gf",
+ "v_tx_mode_vht",
+ "v_tx_mode_he_su",
+ "v_tx_mode_he_ext_su",
+ "v_tx_mode_he_tb",
+ "v_tx_mode_he_mu",
+ "v_tx_bw_20",
+ "v_tx_bw_40",
+ "v_tx_bw_80",
+ "v_tx_bw_160",
+ "v_tx_mcs_0",
+ "v_tx_mcs_1",
+ "v_tx_mcs_2",
+ "v_tx_mcs_3",
+ "v_tx_mcs_4",
+ "v_tx_mcs_5",
+ "v_tx_mcs_6",
+ "v_tx_mcs_7",
+ "v_tx_mcs_8",
+ "v_tx_mcs_9",
+ "v_tx_mcs_10",
+ "v_tx_mcs_11",
+ "v_tx_nss_1",
+ "v_tx_nss_2",
+ "v_tx_nss_3",
+ "v_tx_nss_4",
+};
+
+#define MT7915_SSTATS_LEN ARRAY_SIZE(mt7915_gstrings_stats)
+
+/* Ethtool related API */
+static
+void mt7915_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, mt7915_gstrings_stats, sizeof(mt7915_gstrings_stats));
+ data += sizeof(mt7915_gstrings_stats);
+ page_pool_ethtool_stats_get_strings(data);
+}
+
+static
+int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return 0;
+
+ return MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
+}
+
+static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
+{
+ struct mt76_ethtool_worker_info *wi = wi_data;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (msta->vif->mt76.idx != wi->idx)
+ return;
+
+ mt76_ethtool_worker(wi, &msta->wcid.stats, false);
+}
+
+static
+void mt7915_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt76_mib_stats *mib = &phy->mib;
+ struct mt76_ethtool_worker_info wi = {
+ .data = data,
+ .idx = mvif->mt76.idx,
+ };
+ /* See mt7915_ampdu_stat_read_phy, etc */
+ int i, ei = 0, stats_size;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7915_mac_update_stats(phy);
+
+ data[ei++] = mib->tx_ampdu_cnt;
+ data[ei++] = mib->tx_stop_q_empty_cnt;
+ data[ei++] = mib->tx_mpdu_attempts_cnt;
+ data[ei++] = mib->tx_mpdu_success_cnt;
+ data[ei++] = mib->tx_rwp_fail_cnt;
+ data[ei++] = mib->tx_rwp_need_cnt;
+ data[ei++] = mib->tx_pkt_ebf_cnt;
+ data[ei++] = mib->tx_pkt_ibf_cnt;
+
+ /* Tx ampdu stat */
+ for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++)
+ data[ei++] = phy->mt76->aggr_stats[i];
+
+ data[ei++] = phy->mib.ba_miss_cnt;
+
+ /* Tx Beamformer monitor */
+ data[ei++] = mib->tx_bf_ibf_ppdu_cnt;
+ data[ei++] = mib->tx_bf_ebf_ppdu_cnt;
+
+ /* Tx Beamformer Rx feedback monitor */
+ data[ei++] = mib->tx_bf_rx_fb_all_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_he_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_vht_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_ht_cnt;
+
+ data[ei++] = mib->tx_bf_rx_fb_bw;
+ data[ei++] = mib->tx_bf_rx_fb_nc_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_nr_cnt;
+
+ /* Tx Beamformee Rx NDPA & Tx feedback report */
+ data[ei++] = mib->tx_bf_fb_cpl_cnt;
+ data[ei++] = mib->tx_bf_fb_trig_cnt;
+
+ /* Tx SU & MU counters */
+ data[ei++] = mib->tx_bf_cnt;
+ data[ei++] = mib->tx_mu_mpdu_cnt;
+ data[ei++] = mib->tx_mu_acked_mpdu_cnt;
+ data[ei++] = mib->tx_su_acked_mpdu_cnt;
+
+ /* Tx amsdu info (pack-count histogram) */
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++)
+ data[ei++] = mib->tx_amsdu[i];
+
+ /* rx counters */
+ data[ei++] = mib->rx_fifo_full_cnt;
+ data[ei++] = mib->rx_mpdu_cnt;
+ data[ei++] = mib->channel_idle_cnt;
+ data[ei++] = mib->primary_cca_busy_time;
+ data[ei++] = mib->secondary_cca_busy_time;
+ data[ei++] = mib->primary_energy_detect_time;
+ data[ei++] = mib->cck_mdrdy_time;
+ data[ei++] = mib->ofdm_mdrdy_time;
+ data[ei++] = mib->green_mdrdy_time;
+ data[ei++] = mib->rx_vector_mismatch_cnt;
+ data[ei++] = mib->rx_delimiter_fail_cnt;
+ data[ei++] = mib->rx_mrdy_cnt;
+ data[ei++] = mib->rx_len_mismatch_cnt;
+ data[ei++] = mib->rx_ampdu_cnt;
+ data[ei++] = mib->rx_ampdu_bytes_cnt;
+ data[ei++] = mib->rx_ampdu_valid_subframe_cnt;
+ data[ei++] = mib->rx_ampdu_valid_subframe_bytes_cnt;
+ data[ei++] = mib->rx_pfdrop_cnt;
+ data[ei++] = mib->rx_vec_queue_overflow_drop_cnt;
+ data[ei++] = mib->rx_ba_cnt;
+
+ data[ei++] = mib->dl_cck_cnt;
+ data[ei++] = mib->dl_ofdm_cnt;
+ data[ei++] = mib->dl_htmix_cnt;
+ data[ei++] = mib->dl_htgf_cnt;
+ data[ei++] = mib->dl_vht_su_cnt;
+ data[ei++] = mib->dl_vht_2mu_cnt;
+ data[ei++] = mib->dl_vht_3mu_cnt;
+ data[ei++] = mib->dl_vht_4mu_cnt;
+ data[ei++] = mib->dl_he_su_cnt;
+ data[ei++] = mib->dl_he_ext_su_cnt;
+ data[ei++] = mib->dl_he_2ru_cnt;
+ data[ei++] = mib->dl_he_2mu_cnt;
+ data[ei++] = mib->dl_he_3ru_cnt;
+ data[ei++] = mib->dl_he_3mu_cnt;
+ data[ei++] = mib->dl_he_4ru_cnt;
+ data[ei++] = mib->dl_he_4mu_cnt;
+ data[ei++] = mib->dl_he_5to8ru_cnt;
+ data[ei++] = mib->dl_he_9to16ru_cnt;
+ data[ei++] = mib->dl_he_gtr16ru_cnt;
+
+ data[ei++] = mib->ul_hetrig_su_cnt;
+ data[ei++] = mib->ul_hetrig_2ru_cnt;
+ data[ei++] = mib->ul_hetrig_3ru_cnt;
+ data[ei++] = mib->ul_hetrig_4ru_cnt;
+ data[ei++] = mib->ul_hetrig_5to8ru_cnt;
+ data[ei++] = mib->ul_hetrig_9to16ru_cnt;
+ data[ei++] = mib->ul_hetrig_gtr16ru_cnt;
+ data[ei++] = mib->ul_hetrig_2mu_cnt;
+ data[ei++] = mib->ul_hetrig_3mu_cnt;
+ data[ei++] = mib->ul_hetrig_4mu_cnt;
+
+ /* Add values for all stations owned by this vif */
+ wi.initial_stat_idx = ei;
+ ieee80211_iterate_stations_atomic(hw, mt7915_ethtool_worker, &wi);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (wi.sta_count == 0)
+ return;
+
+ ei += wi.worker_stat_count;
+
+ mt76_ethtool_page_pool_stats(&dev->mt76, &data[ei], &ei);
+
+ stats_size = MT7915_SSTATS_LEN + page_pool_ethtool_stats_get_count();
+ if (ei != stats_size)
+ dev_err(dev->mt76.dev, "ei: %d size: %d", ei, stats_size);
+}
+
+static void
+mt7915_twt_teardown_request(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 flowid)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7915_mac_twt_teardown_flow(dev, msta, flowid);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
+static int
+mt7915_set_radar_background(struct ieee80211_hw *hw,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+ int ret = -EINVAL;
+ bool running;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->mt76.region == NL80211_DFS_UNSET)
+ goto out;
+
+ if (dev->rdd2_phy && dev->rdd2_phy != phy) {
+ /* rdd2 is already locked */
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* rdd2 already configured on a radar channel */
+ running = dev->rdd2_phy &&
+ cfg80211_chandef_valid(&dev->rdd2_chandef) &&
+ !!(dev->rdd2_chandef.chan->flags & IEEE80211_CHAN_RADAR);
+
+ if (!chandef || running ||
+ !(chandef->chan->flags & IEEE80211_CHAN_RADAR)) {
+ ret = mt7915_mcu_rdd_background_enable(phy, NULL);
+ if (ret)
+ goto out;
+
+ if (!running)
+ goto update_phy;
+ }
+
+ ret = mt7915_mcu_rdd_background_enable(phy, chandef);
+ if (ret)
+ goto out;
+
+update_phy:
+ dev->rdd2_phy = chandef ? phy : NULL;
+ if (chandef)
+ dev->rdd2_chandef = *chandef;
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static int
+mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct net_device_path_ctx *ctx,
+ struct net_device_path *path)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+
+ if (!mtk_wed_device_active(wed))
+ return -ENODEV;
+
+ if (msta->wcid.idx > 0xff)
+ return -EIO;
+
+ path->type = DEV_PATH_MTK_WDMA;
+ path->dev = ctx->dev;
+ path->mtk_wdma.wdma_idx = wed->wdma_idx;
+ path->mtk_wdma.bss = mvif->mt76.idx;
+ path->mtk_wdma.wcid = is_mt7915(&dev->mt76) ? msta->wcid.idx : 0x3ff;
+ path->mtk_wdma.queue = phy != &dev->phy;
+
+ ctx->dev = NULL;
+
+ return 0;
+}
+#endif
+
+const struct ieee80211_ops mt7915_ops = {
+ .tx = mt7915_tx,
+ .start = mt7915_start,
+ .stop = mt7915_stop,
+ .add_interface = mt7915_add_interface,
+ .remove_interface = mt7915_remove_interface,
+ .config = mt7915_config,
+ .conf_tx = mt7915_conf_tx,
+ .configure_filter = mt7915_configure_filter,
+ .bss_info_changed = mt7915_bss_info_changed,
+ .start_ap = mt7915_start_ap,
+ .stop_ap = mt7915_stop_ap,
+ .sta_add = mt7915_sta_add,
+ .sta_remove = mt7915_sta_remove,
+ .sta_pre_rcu_remove = mt76_sta_pre_rcu_remove,
+ .sta_rc_update = mt7915_sta_rc_update,
+ .set_key = mt7915_set_key,
+ .ampdu_action = mt7915_ampdu_action,
+ .set_rts_threshold = mt7915_set_rts_threshold,
+ .wake_tx_queue = mt76_wake_tx_queue,
+ .sw_scan_start = mt76_sw_scan,
+ .sw_scan_complete = mt76_sw_scan_complete,
+ .release_buffered_frames = mt76_release_buffered_frames,
+ .get_txpower = mt76_get_txpower,
+ .set_sar_specs = mt7915_set_sar_specs,
+ .channel_switch_beacon = mt7915_channel_switch_beacon,
+ .get_stats = mt7915_get_stats,
+ .get_et_sset_count = mt7915_get_et_sset_count,
+ .get_et_stats = mt7915_get_et_stats,
+ .get_et_strings = mt7915_get_et_strings,
+ .get_tsf = mt7915_get_tsf,
+ .set_tsf = mt7915_set_tsf,
+ .offset_tsf = mt7915_offset_tsf,
+ .get_survey = mt76_get_survey,
+ .get_antenna = mt76_get_antenna,
+ .set_antenna = mt7915_set_antenna,
+ .set_bitrate_mask = mt7915_set_bitrate_mask,
+ .set_coverage_class = mt7915_set_coverage_class,
+ .sta_statistics = mt7915_sta_statistics,
+ .sta_set_txpwr = mt7915_sta_set_txpwr,
+ .sta_set_4addr = mt7915_sta_set_4addr,
+ .sta_set_decap_offload = mt7915_sta_set_decap_offload,
+ .add_twt_setup = mt7915_mac_add_twt_setup,
+ .twt_teardown_request = mt7915_twt_teardown_request,
+ CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .sta_add_debugfs = mt7915_sta_add_debugfs,
+#endif
+ .set_radar_background = mt7915_set_radar_background,
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ .net_fill_forward_path = mt7915_net_fill_forward_path,
+#endif
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
new file mode 100644
index 0000000000..5d8e985cd7
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -0,0 +1,3879 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/fs.h>
+#include "mt7915.h"
+#include "mcu.h"
+#include "mac.h"
+#include "eeprom.h"
+
+#define fw_name(_dev, name, ...) ({ \
+ char *_fw; \
+ switch (mt76_chip(&(_dev)->mt76)) { \
+ case 0x7915: \
+ _fw = MT7915_##name; \
+ break; \
+ case 0x7981: \
+ _fw = MT7981_##name; \
+ break; \
+ case 0x7986: \
+ _fw = MT7986_##name##__VA_ARGS__; \
+ break; \
+ default: \
+ _fw = MT7916_##name; \
+ break; \
+ } \
+ _fw; \
+})
+
+#define fw_name_var(_dev, name) (mt7915_check_adie(dev, false) ? \
+ fw_name(_dev, name) : \
+ fw_name(_dev, name, _MT7975))
+
+#define MCU_PATCH_ADDRESS 0x200000
+
+#define HE_PHY(p, c) u8_get_bits(c, IEEE80211_HE_PHY_##p)
+#define HE_MAC(m, c) u8_get_bits(c, IEEE80211_HE_MAC_##m)
+
+static bool sr_scene_detect = true;
+module_param(sr_scene_detect, bool, 0644);
+MODULE_PARM_DESC(sr_scene_detect, "Enable firmware scene detection algorithm");
+
+static u8
+mt7915_mcu_get_sta_nss(u16 mcs_map)
+{
+ u8 nss;
+
+ for (nss = 8; nss > 0; nss--) {
+ u8 nss_mcs = (mcs_map >> (2 * (nss - 1))) & 3;
+
+ if (nss_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ break;
+ }
+
+ return nss - 1;
+}
+
+static void
+mt7915_mcu_set_sta_he_mcs(struct ieee80211_sta *sta, __le16 *he_mcs,
+ u16 mcs_map)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
+ const u16 *mask = msta->vif->bitrate_mask.control[band].he_mcs;
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+
+ for (nss = 0; nss < max_nss; nss++) {
+ int mcs;
+
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ mcs = GENMASK(11, 0);
+ break;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ mcs = GENMASK(9, 0);
+ break;
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ mcs = GENMASK(7, 0);
+ break;
+ default:
+ mcs = 0;
+ }
+
+ mcs = mcs ? fls(mcs & mask[nss]) - 1 : -1;
+
+ switch (mcs) {
+ case 0 ... 7:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+ break;
+ case 8 ... 9:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+ break;
+ case 10 ... 11:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+ break;
+ default:
+ mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ break;
+ }
+ mcs_map &= ~(0x3 << (nss * 2));
+ mcs_map |= mcs << (nss * 2);
+
+ /* only support 2ss on 160MHz for mt7915 */
+ if (is_mt7915(&dev->mt76) && nss > 1 &&
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ break;
+ }
+
+ *he_mcs = cpu_to_le16(mcs_map);
+}
+
+static void
+mt7915_mcu_set_sta_vht_mcs(struct ieee80211_sta *sta, __le16 *vht_mcs,
+ const u16 *mask)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ u16 mcs_map = le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+ u16 mcs;
+
+ for (nss = 0; nss < max_nss; nss++, mcs_map >>= 2) {
+ switch (mcs_map & 0x3) {
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ mcs = GENMASK(9, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ mcs = GENMASK(8, 0);
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ mcs = GENMASK(7, 0);
+ break;
+ default:
+ mcs = 0;
+ }
+
+ vht_mcs[nss] = cpu_to_le16(mcs & mask[nss]);
+
+ /* only support 2ss on 160MHz for mt7915 */
+ if (is_mt7915(&dev->mt76) && nss > 1 &&
+ sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ break;
+ }
+}
+
+static void
+mt7915_mcu_set_sta_ht_mcs(struct ieee80211_sta *sta, u8 *ht_mcs,
+ const u8 *mask)
+{
+ int nss, max_nss = sta->deflink.rx_nss > 3 ? 4 : sta->deflink.rx_nss;
+
+ for (nss = 0; nss < max_nss; nss++)
+ ht_mcs[nss] = sta->deflink.ht_cap.mcs.rx_mask[nss] & mask[nss];
+}
+
+static int
+mt7915_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ struct sk_buff *skb, int seq)
+{
+ struct mt76_connac2_mcu_rxd *rxd;
+ int ret = 0;
+
+ if (!skb) {
+ dev_err(mdev->dev, "Message %08x (seq %d) timeout\n",
+ cmd, seq);
+ return -ETIMEDOUT;
+ }
+
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+ if (seq != rxd->seq &&
+ !(rxd->eid == MCU_CMD_EXT_CID &&
+ rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT))
+ return -EAGAIN;
+
+ if (cmd == MCU_CMD(PATCH_SEM_CONTROL)) {
+ skb_pull(skb, sizeof(*rxd) - 4);
+ ret = *skb->data;
+ } else if (cmd == MCU_EXT_CMD(THERMAL_CTRL)) {
+ skb_pull(skb, sizeof(*rxd) + 4);
+ ret = le32_to_cpu(*(__le32 *)skb->data);
+ } else {
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
+ }
+
+ return ret;
+}
+
+static int
+mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ enum mt76_mcuq_id qid;
+ int ret;
+
+ ret = mt76_connac2_mcu_fill_message(mdev, skb, cmd, wait_seq);
+ if (ret)
+ return ret;
+
+ if (cmd == MCU_CMD(FW_SCATTER))
+ qid = MT_MCUQ_FWDL;
+ else if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state))
+ qid = MT_MCUQ_WA;
+ else
+ qid = MT_MCUQ_WM;
+
+ return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
+}
+
+int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
+{
+ struct {
+ __le32 args[3];
+ } req = {
+ .args = {
+ cpu_to_le32(a1),
+ cpu_to_le32(a2),
+ cpu_to_le32(a3),
+ },
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), false);
+}
+
+static void
+mt7915_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (vif->bss_conf.csa_active)
+ ieee80211_csa_finish(vif);
+}
+
+static void
+mt7915_mcu_rx_csa_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_csa_notify *c;
+
+ c = (struct mt7915_mcu_csa_notify *)skb->data;
+
+ if (c->band_idx > MT_BAND1)
+ return;
+
+ if ((c->band_idx && !dev->phy.mt76->band_idx) &&
+ dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_mcu_csa_finish, mphy->hw);
+}
+
+static void
+mt7915_mcu_rx_thermal_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_thermal_notify *t;
+ struct mt7915_phy *phy;
+
+ t = (struct mt7915_mcu_thermal_notify *)skb->data;
+ if (t->ctrl.ctrl_id != THERMAL_PROTECT_ENABLE)
+ return;
+
+ if (t->ctrl.band_idx > MT_BAND1)
+ return;
+
+ if ((t->ctrl.band_idx && !dev->phy.mt76->band_idx) &&
+ dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
+
+ phy = (struct mt7915_phy *)mphy->priv;
+ phy->throttle_state = t->ctrl.duty.duty_cycle;
+}
+
+static void
+mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_rdd_report *r;
+
+ r = (struct mt7915_mcu_rdd_report *)skb->data;
+
+ if (r->band_idx > MT_RX_SEL2)
+ return;
+
+ if ((r->band_idx && !dev->phy.mt76->band_idx) &&
+ dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
+
+ if (r->band_idx == MT_RX_SEL2)
+ cfg80211_background_radar_event(mphy->hw->wiphy,
+ &dev->rdd2_chandef,
+ GFP_ATOMIC);
+ else
+ ieee80211_radar_detected(mphy->hw);
+ dev->hw_pattern++;
+}
+
+static void
+mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_connac2_mcu_rxd *rxd;
+ int len = skb->len - sizeof(*rxd);
+ const char *data, *type;
+
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+ data = (char *)&rxd[1];
+
+ switch (rxd->s2d_index) {
+ case 0:
+ if (mt7915_debugfs_rx_log(dev, data, len))
+ return;
+
+ type = "WM";
+ break;
+ case 2:
+ type = "WA";
+ break;
+ default:
+ type = "unknown";
+ break;
+ }
+
+ wiphy_info(mt76_hw(dev)->wiphy, "%s: %.*s", type, len, data);
+}
+
+static void
+mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (!vif->bss_conf.color_change_active)
+ return;
+
+ ieee80211_color_change_finish(vif);
+}
+
+static void
+mt7915_mcu_rx_bcc_notify(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt7915_mcu_bcc_notify *b;
+
+ b = (struct mt7915_mcu_bcc_notify *)skb->data;
+
+ if (b->band_idx > MT_BAND1)
+ return;
+
+ if ((b->band_idx && !dev->phy.mt76->band_idx) &&
+ dev->mt76.phys[MT_BAND1])
+ mphy = dev->mt76.phys[MT_BAND1];
+
+ ieee80211_iterate_active_interfaces_atomic(mphy->hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_mcu_cca_finish, mphy->hw);
+}
+
+static void
+mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_connac2_mcu_rxd *rxd;
+
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+ switch (rxd->ext_eid) {
+ case MCU_EXT_EVENT_THERMAL_PROTECT:
+ mt7915_mcu_rx_thermal_notify(dev, skb);
+ break;
+ case MCU_EXT_EVENT_RDD_REPORT:
+ mt7915_mcu_rx_radar_detected(dev, skb);
+ break;
+ case MCU_EXT_EVENT_CSA_NOTIFY:
+ mt7915_mcu_rx_csa_notify(dev, skb);
+ break;
+ case MCU_EXT_EVENT_FW_LOG_2_HOST:
+ mt7915_mcu_rx_log_message(dev, skb);
+ break;
+ case MCU_EXT_EVENT_BCC_NOTIFY:
+ mt7915_mcu_rx_bcc_notify(dev, skb);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mt7915_mcu_rx_unsolicited_event(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_connac2_mcu_rxd *rxd;
+
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+ switch (rxd->eid) {
+ case MCU_EVENT_EXT:
+ mt7915_mcu_rx_ext_event(dev, skb);
+ break;
+ default:
+ break;
+ }
+ dev_kfree_skb(skb);
+}
+
+void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_connac2_mcu_rxd *rxd;
+
+ rxd = (struct mt76_connac2_mcu_rxd *)skb->data;
+ if ((rxd->ext_eid == MCU_EXT_EVENT_THERMAL_PROTECT ||
+ rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
+ rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
+ rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
+ rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
+ !rxd->seq) &&
+ !(rxd->eid == MCU_CMD_EXT_CID &&
+ rxd->ext_eid == MCU_EXT_EVENT_WA_TX_STAT))
+ mt7915_mcu_rx_unsolicited_event(dev, skb);
+ else
+ mt76_mcu_rx_event(&dev->mt76, skb);
+}
+
+static struct tlv *
+mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
+ __le16 *sub_ntlv, __le16 *len)
+{
+ struct tlv *ptlv, tlv = {
+ .tag = cpu_to_le16(sub_tag),
+ .len = cpu_to_le16(sub_len),
+ };
+
+ ptlv = skb_put(skb, sub_len);
+ memcpy(ptlv, &tlv, sizeof(tlv));
+
+ le16_add_cpu(sub_ntlv, 1);
+ le16_add_cpu(len, sub_len);
+
+ return ptlv;
+}
+
+/** bss info **/
+struct mt7915_he_obss_narrow_bw_ru_data {
+ bool tolerated;
+};
+
+static void mt7915_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
+ struct cfg80211_bss *bss,
+ void *_data)
+{
+ struct mt7915_he_obss_narrow_bw_ru_data *data = _data;
+ const struct element *elem;
+
+ rcu_read_lock();
+ elem = ieee80211_bss_get_elem(bss, WLAN_EID_EXT_CAPABILITY);
+
+ if (!elem || elem->datalen <= 10 ||
+ !(elem->data[10] &
+ WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT))
+ data->tolerated = false;
+
+ rcu_read_unlock();
+}
+
+static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_he_obss_narrow_bw_ru_data iter_data = {
+ .tolerated = true,
+ };
+
+ if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR))
+ return false;
+
+ cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef,
+ mt7915_check_he_obss_narrow_bw_ru_iter,
+ &iter_data);
+
+ /*
+ * If there is at least one AP on radar channel that cannot
+ * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU.
+ */
+ return !iter_data.tolerated;
+}
+
+static void
+mt7915_mcu_bss_rfch_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy)
+{
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ struct bss_info_rf_ch *ch;
+ struct tlv *tlv;
+ int freq1 = chandef->center_freq1;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RF_CH, sizeof(*ch));
+
+ ch = (struct bss_info_rf_ch *)tlv;
+ ch->pri_ch = chandef->chan->hw_value;
+ ch->center_ch0 = ieee80211_frequency_to_channel(freq1);
+ ch->bw = mt76_connac_chan_bw(chandef);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ int freq2 = chandef->center_freq2;
+
+ ch->center_ch1 = ieee80211_frequency_to_channel(freq2);
+ }
+
+ if (vif->bss_conf.he_support && vif->type == NL80211_IFTYPE_STATION) {
+ struct mt76_phy *mphy = phy->mt76;
+
+ ch->he_ru26_block =
+ mt7915_check_he_obss_narrow_bw_ru(mphy->hw, vif);
+ ch->he_all_disable = false;
+ } else {
+ ch->he_all_disable = true;
+ }
+}
+
+static void
+mt7915_mcu_bss_ra_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy)
+{
+ int max_nss = hweight8(phy->mt76->antenna_mask);
+ struct bss_info_ra *ra;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_RA, sizeof(*ra));
+
+ ra = (struct bss_info_ra *)tlv;
+ ra->op_mode = vif->type == NL80211_IFTYPE_AP;
+ ra->adhoc_en = vif->type == NL80211_IFTYPE_ADHOC;
+ ra->short_preamble = true;
+ ra->tx_streams = max_nss;
+ ra->rx_streams = max_nss;
+ ra->algo = 4;
+ ra->train_up_rule = 2;
+ ra->train_up_high_thres = 110;
+ ra->train_up_rule_rssi = -70;
+ ra->low_traffic_thres = 2;
+ ra->phy_cap = cpu_to_le32(0xfdf);
+ ra->interval = cpu_to_le32(500);
+ ra->fast_interval = cpu_to_le32(100);
+}
+
+static void
+mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy)
+{
+#define DEFAULT_HE_PE_DURATION 4
+#define DEFAULT_HE_DURATION_RTS_THRES 1023
+ const struct ieee80211_sta_he_cap *cap;
+ struct bss_info_he *he;
+ struct tlv *tlv;
+
+ cap = mt76_connac_get_he_phy_cap(phy->mt76, vif);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
+
+ he = (struct bss_info_he *)tlv;
+ he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
+ if (!he->he_pe_duration)
+ he->he_pe_duration = DEFAULT_HE_PE_DURATION;
+
+ he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
+ if (!he->he_rts_thres)
+ he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
+
+ he->max_nss_mcs[CMD_HE_MCS_BW80] = cap->he_mcs_nss_supp.tx_mcs_80;
+ he->max_nss_mcs[CMD_HE_MCS_BW160] = cap->he_mcs_nss_supp.tx_mcs_160;
+ he->max_nss_mcs[CMD_HE_MCS_BW8080] = cap->he_mcs_nss_supp.tx_mcs_80p80;
+}
+
+static void
+mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
+{
+#define TXD_CMP_MAP1 GENMASK(15, 0)
+#define TXD_CMP_MAP2 (GENMASK(31, 0) & ~BIT(23))
+ struct bss_info_hw_amsdu *amsdu;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
+
+ amsdu = (struct bss_info_hw_amsdu *)tlv;
+ amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1);
+ amsdu->cmp_bitmap_1 = cpu_to_le32(TXD_CMP_MAP2);
+ amsdu->trig_thres = cpu_to_le16(2);
+ amsdu->enable = true;
+}
+
+static void
+mt7915_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt7915_phy *phy)
+{
+ struct bss_info_bmc_rate *bmc;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BMC_RATE, sizeof(*bmc));
+
+ bmc = (struct bss_info_bmc_rate *)tlv;
+ if (band == NL80211_BAND_2GHZ) {
+ bmc->short_preamble = true;
+ } else {
+ bmc->bc_trans = cpu_to_le16(0x2000);
+ bmc->mc_trans = cpu_to_le16(0x2080);
+ }
+}
+
+static int
+mt7915_mcu_muar_config(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ bool bssid, bool enable)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ u32 idx = mvif->mt76.omac_idx - REPEATER_BSSID_START;
+ u32 mask = phy->omac_mask >> 32 & ~BIT(idx);
+ const u8 *addr = vif->addr;
+ struct {
+ u8 mode;
+ u8 force_clear;
+ u8 clear_bitmap[8];
+ u8 entry_count;
+ u8 write;
+ u8 band;
+
+ u8 index;
+ u8 bssid;
+ u8 addr[ETH_ALEN];
+ } __packed req = {
+ .mode = !!mask || enable,
+ .entry_count = 1,
+ .write = 1,
+ .band = phy->mt76->band_idx,
+ .index = idx * 2 + bssid,
+ };
+
+ if (bssid)
+ addr = vif->bss_conf.bssid;
+
+ if (enable)
+ ether_addr_copy(req.addr, addr);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MUAR_UPDATE), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif, int enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
+ struct sk_buff *skb;
+
+ if (mvif->mt76.omac_idx >= REPEATER_BSSID_START) {
+ mt7915_mcu_muar_config(phy, vif, false, enable);
+ mt7915_mcu_muar_config(phy, vif, true, enable);
+ }
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
+ MT7915_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* bss_omac must be first */
+ if (enable)
+ mt76_connac_mcu_bss_omac_tlv(skb, vif);
+
+ mt76_connac_mcu_bss_basic_tlv(skb, vif, NULL, phy->mt76,
+ mvif->sta.wcid.idx, enable);
+
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ goto out;
+
+ if (enable) {
+ mt7915_mcu_bss_rfch_tlv(skb, vif, phy);
+ mt7915_mcu_bss_bmc_tlv(skb, phy);
+ mt7915_mcu_bss_ra_tlv(skb, vif, phy);
+ mt7915_mcu_bss_hw_amsdu_tlv(skb);
+
+ if (vif->bss_conf.he_support)
+ mt7915_mcu_bss_he_tlv(skb, vif, phy);
+
+ if (mvif->mt76.omac_idx >= EXT_BSSID_START &&
+ mvif->mt76.omac_idx < REPEATER_BSSID_START)
+ mt76_connac_mcu_bss_ext_tlv(skb, &mvif->mt76);
+ }
+out:
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+}
+
+/** starec & wtbl **/
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
+
+ if (enable && !params->amsdu)
+ msta->wcid.amsdu = false;
+
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, true);
+}
+
+int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool enable)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)params->sta->drv_priv;
+ struct mt7915_vif *mvif = msta->vif;
+
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ MCU_EXT_CMD(STA_REC_UPDATE),
+ enable, false);
+}
+
+static void
+mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp mcs_map;
+ struct sta_rec_he *he;
+ struct tlv *tlv;
+ u32 cap = 0;
+
+ if (!sta->deflink.he_cap.has_he)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
+
+ he = (struct sta_rec_he *)tlv;
+
+ if (elem->mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_HTC_HE)
+ cap |= STA_REC_HE_CAP_HTC;
+
+ if (elem->mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+ cap |= STA_REC_HE_CAP_BSR;
+
+ if (elem->mac_cap_info[3] & IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+ cap |= STA_REC_HE_CAP_OM;
+
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU)
+ cap |= STA_REC_HE_CAP_AMSDU_IN_AMPDU;
+
+ if (elem->mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+ cap |= STA_REC_HE_CAP_BQR;
+
+ if (elem->phy_cap_info[0] &
+ (IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
+ cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
+
+ if (mvif->cap.he_ldpc &&
+ (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
+ cap |= STA_REC_HE_CAP_LDPC;
+
+ if (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US)
+ cap |= STA_REC_HE_CAP_SU_PPDU_1LTF_8US_GI;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US)
+ cap |= STA_REC_HE_CAP_NDP_4LTF_3DOT2MS_GI;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ)
+ cap |= STA_REC_HE_CAP_LE_EQ_80M_TX_STBC;
+
+ if (elem->phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC;
+
+ if (elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB)
+ cap |= STA_REC_HE_CAP_TRIG_CQI_FK;
+
+ if (elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE)
+ cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_SU_MU_PPDU_4LTF_8US_GI;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ)
+ cap |= STA_REC_HE_CAP_GT_80M_TX_STBC;
+
+ if (elem->phy_cap_info[7] &
+ IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ)
+ cap |= STA_REC_HE_CAP_GT_80M_RX_STBC;
+
+ if (elem->phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_ER_SU_PPDU_4LTF_8US_GI;
+
+ if (elem->phy_cap_info[8] &
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI)
+ cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU)
+ cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242;
+
+ if (elem->phy_cap_info[9] &
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU)
+ cap |= STA_REC_HE_CAP_RX_1024QAM_UNDER_RU242;
+
+ he->he_cap = cpu_to_le32(cap);
+
+ mcs_map = sta->deflink.he_cap.he_mcs_nss_supp;
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (elem->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ mt7915_mcu_set_sta_he_mcs(sta,
+ &he->max_nss_mcs[CMD_HE_MCS_BW8080],
+ le16_to_cpu(mcs_map.rx_mcs_80p80));
+
+ mt7915_mcu_set_sta_he_mcs(sta,
+ &he->max_nss_mcs[CMD_HE_MCS_BW160],
+ le16_to_cpu(mcs_map.rx_mcs_160));
+ fallthrough;
+ default:
+ mt7915_mcu_set_sta_he_mcs(sta,
+ &he->max_nss_mcs[CMD_HE_MCS_BW80],
+ le16_to_cpu(mcs_map.rx_mcs_80));
+ break;
+ }
+
+ he->t_frame_dur =
+ HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
+ he->max_ampdu_exp =
+ HE_MAC(CAP3_MAX_AMPDU_LEN_EXP_MASK, elem->mac_cap_info[3]);
+
+ he->bw_set =
+ HE_PHY(CAP0_CHANNEL_WIDTH_SET_MASK, elem->phy_cap_info[0]);
+ he->device_class =
+ HE_PHY(CAP1_DEVICE_CLASS_A, elem->phy_cap_info[1]);
+ he->punc_pream_rx =
+ HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
+
+ he->dcm_tx_mode =
+ HE_PHY(CAP3_DCM_MAX_CONST_TX_MASK, elem->phy_cap_info[3]);
+ he->dcm_tx_max_nss =
+ HE_PHY(CAP3_DCM_MAX_TX_NSS_2, elem->phy_cap_info[3]);
+ he->dcm_rx_mode =
+ HE_PHY(CAP3_DCM_MAX_CONST_RX_MASK, elem->phy_cap_info[3]);
+ he->dcm_rx_max_nss =
+ HE_PHY(CAP3_DCM_MAX_RX_NSS_2, elem->phy_cap_info[3]);
+ he->dcm_rx_max_nss =
+ HE_PHY(CAP8_DCM_MAX_RU_MASK, elem->phy_cap_info[8]);
+
+ he->pkt_ext = 2;
+}
+
+static void
+mt7915_mcu_sta_muru_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_sta *sta, struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_he_cap_elem *elem = &sta->deflink.he_cap.he_cap_elem;
+ struct sta_rec_muru *muru;
+ struct tlv *tlv;
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
+
+ muru = (struct sta_rec_muru *)tlv;
+
+ muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer ||
+ mvif->cap.vht_mu_ebfer ||
+ mvif->cap.vht_mu_ebfee;
+ if (!is_mt7915(&dev->mt76))
+ muru->cfg.mimo_ul_en = true;
+ muru->cfg.ofdma_dl_en = true;
+
+ if (sta->deflink.vht_cap.vht_supported)
+ muru->mimo_dl.vht_mu_bfee =
+ !!(sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+
+ if (!sta->deflink.he_cap.has_he)
+ return;
+
+ muru->mimo_dl.partial_bw_dl_mimo =
+ HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
+
+ muru->mimo_ul.full_ul_mimo =
+ HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
+ muru->mimo_ul.partial_ul_mimo =
+ HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
+
+ muru->ofdma_dl.punc_pream_rx =
+ HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
+ muru->ofdma_dl.he_20m_in_40m_2g =
+ HE_PHY(CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G, elem->phy_cap_info[8]);
+ muru->ofdma_dl.he_20m_in_160m =
+ HE_PHY(CAP8_20MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]);
+ muru->ofdma_dl.he_80m_in_160m =
+ HE_PHY(CAP8_80MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]);
+
+ muru->ofdma_ul.t_frame_dur =
+ HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
+ muru->ofdma_ul.mu_cascading =
+ HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]);
+ muru->ofdma_ul.uo_ra =
+ HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]);
+}
+
+static void
+mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct sta_rec_ht *ht;
+ struct tlv *tlv;
+
+ if (!sta->deflink.ht_cap.ht_supported)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+
+ ht = (struct sta_rec_ht *)tlv;
+ ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
+}
+
+static void
+mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct sta_rec_vht *vht;
+ struct tlv *tlv;
+
+ if (!sta->deflink.vht_cap.vht_supported)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
+
+ vht = (struct sta_rec_vht *)tlv;
+ vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
+ vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
+}
+
+static void
+mt7915_mcu_sta_amsdu_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sta_rec_amsdu *amsdu;
+ struct tlv *tlv;
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return;
+
+ if (!sta->deflink.agg.max_amsdu_len)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ amsdu = (struct sta_rec_amsdu *)tlv;
+ amsdu->max_amsdu_num = 8;
+ amsdu->amsdu_en = true;
+ msta->wcid.amsdu = true;
+
+ switch (sta->deflink.agg.max_amsdu_len) {
+ case IEEE80211_MAX_MPDU_LEN_VHT_11454:
+ if (!is_mt7915(&dev->mt76)) {
+ amsdu->max_mpdu_size =
+ IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+ return;
+ }
+ fallthrough;
+ case IEEE80211_MAX_MPDU_LEN_HT_7935:
+ case IEEE80211_MAX_MPDU_LEN_VHT_7991:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+ return;
+ default:
+ amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
+ return;
+ }
+}
+
+static int
+mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct mt76_wcid *wcid;
+ struct tlv *tlv;
+
+ msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+ wcid = sta ? &msta->wcid : NULL;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_RESET_AND_SET, tlv,
+ &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_generic_tlv(&dev->mt76, skb, vif, sta, tlv,
+ wtbl_hdr);
+ mt76_connac_mcu_wtbl_hdr_trans_tlv(skb, vif, wcid, tlv, wtbl_hdr);
+ if (sta)
+ mt76_connac_mcu_wtbl_ht_tlv(&dev->mt76, skb, sta, tlv,
+ wtbl_hdr, mvif->cap.ht_ldpc,
+ mvif->cap.vht_ldpc);
+
+ return 0;
+}
+
+static inline bool
+mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool bfee)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ int sts = hweight16(phy->mt76->chainmask);
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return false;
+
+ if (!bfee && sts < 2)
+ return false;
+
+ if (sta->deflink.he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
+
+ if (bfee)
+ return mvif->cap.he_su_ebfee &&
+ HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
+ else
+ return mvif->cap.he_su_ebfer &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
+ }
+
+ if (sta->deflink.vht_cap.vht_supported) {
+ u32 cap = sta->deflink.vht_cap.cap;
+
+ if (bfee)
+ return mvif->cap.vht_su_ebfee &&
+ (cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ else
+ return mvif->cap.vht_su_ebfer &&
+ (cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ }
+
+ return false;
+}
+
+static void
+mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf)
+{
+ bf->sounding_phy = MT_PHY_TYPE_OFDM;
+ bf->ndp_rate = 0; /* mcs0 */
+ bf->ndpa_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */
+ bf->rept_poll_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */
+}
+
+static void
+mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
+ struct sta_rec_bf *bf)
+{
+ struct ieee80211_mcs_info *mcs = &sta->deflink.ht_cap.mcs;
+ u8 n = 0;
+
+ bf->tx_mode = MT_PHY_TYPE_HT;
+
+ if ((mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF) &&
+ (mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED))
+ n = FIELD_GET(IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK,
+ mcs->tx_params);
+ else if (mcs->rx_mask[3])
+ n = 3;
+ else if (mcs->rx_mask[2])
+ n = 2;
+ else if (mcs->rx_mask[1])
+ n = 1;
+
+ bf->nrow = hweight8(phy->mt76->chainmask) - 1;
+ bf->ncol = min_t(u8, bf->nrow, n);
+ bf->ibf_ncol = n;
+}
+
+static void
+mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
+ struct sta_rec_bf *bf, bool explicit)
+{
+ struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
+ struct ieee80211_sta_vht_cap *vc = &phy->mt76->sband_5g.sband.vht_cap;
+ u16 mcs_map = le16_to_cpu(pc->vht_mcs.rx_mcs_map);
+ u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+ u8 tx_ant = hweight8(phy->mt76->chainmask) - 1;
+
+ bf->tx_mode = MT_PHY_TYPE_VHT;
+
+ if (explicit) {
+ u8 sts, snd_dim;
+
+ mt7915_mcu_sta_sounding_rate(bf);
+
+ sts = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
+ pc->cap);
+ snd_dim = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ vc->cap);
+ bf->nrow = min_t(u8, min_t(u8, snd_dim, sts), tx_ant);
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = bf->ncol;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ bf->nrow = 1;
+ } else {
+ bf->nrow = tx_ant;
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = nss_mcs;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ bf->ibf_nrow = 1;
+ }
+}
+
+static void
+mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+ struct mt7915_phy *phy, struct sta_rec_bf *bf)
+{
+ struct ieee80211_sta_he_cap *pc = &sta->deflink.he_cap;
+ struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
+ const struct ieee80211_sta_he_cap *vc =
+ mt76_connac_get_he_phy_cap(phy->mt76, vif);
+ const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem;
+ u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80);
+ u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+ u8 snd_dim, sts;
+
+ bf->tx_mode = MT_PHY_TYPE_HE_SU;
+
+ mt7915_mcu_sta_sounding_rate(bf);
+
+ bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB,
+ pe->phy_cap_info[6]);
+ bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB,
+ pe->phy_cap_info[6]);
+ snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ ve->phy_cap_info[5]);
+ sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[4]);
+ bf->nrow = min_t(u8, snd_dim, sts);
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = bf->ncol;
+
+ if (sta->deflink.bandwidth != IEEE80211_STA_RX_BW_160)
+ return;
+
+ /* go over for 160MHz and 80p80 */
+ if (pe->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G) {
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160);
+ nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+
+ bf->ncol_gt_bw80 = nss_mcs;
+ }
+
+ if (pe->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
+ mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80);
+ nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
+
+ if (bf->ncol_gt_bw80)
+ bf->ncol_gt_bw80 = min_t(u8, bf->ncol_gt_bw80, nss_mcs);
+ else
+ bf->ncol_gt_bw80 = nss_mcs;
+ }
+
+ snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ ve->phy_cap_info[5]);
+ sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK,
+ pe->phy_cap_info[4]);
+
+ bf->nrow_gt_bw80 = min_t(int, snd_dim, sts);
+}
+
+static void
+mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_phy *phy = mvif->phy;
+ int tx_ant = hweight8(phy->mt76->chainmask) - 1;
+ struct sta_rec_bf *bf;
+ struct tlv *tlv;
+ const u8 matrix[4][4] = {
+ {0, 0, 0, 0},
+ {1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
+ {2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
+ {3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */
+ };
+ bool ebf;
+
+ if (!(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ return;
+
+ ebf = mt7915_is_ebf_supported(phy, vif, sta, false);
+ if (!ebf && !dev->ibf)
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
+ bf = (struct sta_rec_bf *)tlv;
+
+ /* he: eBF only, in accordance with spec
+ * vht: support eBF and iBF
+ * ht: iBF only, since mac80211 lacks of eBF support
+ */
+ if (sta->deflink.he_cap.has_he && ebf)
+ mt7915_mcu_sta_bfer_he(sta, vif, phy, bf);
+ else if (sta->deflink.vht_cap.vht_supported)
+ mt7915_mcu_sta_bfer_vht(sta, phy, bf, ebf);
+ else if (sta->deflink.ht_cap.ht_supported)
+ mt7915_mcu_sta_bfer_ht(sta, phy, bf);
+ else
+ return;
+
+ bf->bf_cap = ebf ? ebf : dev->ibf << 1;
+ bf->bw = sta->deflink.bandwidth;
+ bf->ibf_dbw = sta->deflink.bandwidth;
+ bf->ibf_nrow = tx_ant;
+
+ if (!ebf && sta->deflink.bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
+ bf->ibf_timeout = 0x48;
+ else
+ bf->ibf_timeout = 0x18;
+
+ if (ebf && bf->nrow != tx_ant)
+ bf->mem_20m = matrix[tx_ant][bf->ncol];
+ else
+ bf->mem_20m = matrix[bf->nrow][bf->ncol];
+
+ switch (sta->deflink.bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ case IEEE80211_STA_RX_BW_80:
+ bf->mem_total = bf->mem_20m * 2;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bf->mem_total = bf->mem_20m;
+ break;
+ case IEEE80211_STA_RX_BW_20:
+ default:
+ break;
+ }
+}
+
+static void
+mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_phy *phy = mvif->phy;
+ int tx_ant = hweight8(phy->mt76->chainmask) - 1;
+ struct sta_rec_bfee *bfee;
+ struct tlv *tlv;
+ u8 nrow = 0;
+
+ if (!(sta->deflink.vht_cap.vht_supported || sta->deflink.he_cap.has_he))
+ return;
+
+ if (!mt7915_is_ebf_supported(phy, vif, sta, true))
+ return;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
+ bfee = (struct sta_rec_bfee *)tlv;
+
+ if (sta->deflink.he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->deflink.he_cap.he_cap_elem;
+
+ nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[5]);
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ struct ieee80211_sta_vht_cap *pc = &sta->deflink.vht_cap;
+
+ nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ pc->cap);
+ }
+
+ /* reply with identity matrix to avoid 2x2 BF negative gain */
+ bfee->fb_identity_matrix = (nrow == 1 && tx_ant == 2);
+}
+
+static enum mcu_mmps_mode
+mt7915_mcu_get_mmps_mode(enum ieee80211_smps_mode smps)
+{
+ switch (smps) {
+ case IEEE80211_SMPS_OFF:
+ return MCU_MMPS_DISABLE;
+ case IEEE80211_SMPS_STATIC:
+ return MCU_MMPS_STATIC;
+ case IEEE80211_SMPS_DYNAMIC:
+ return MCU_MMPS_DYNAMIC;
+ default:
+ return MCU_MMPS_DISABLE;
+ }
+}
+
+int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ void *data, u32 field)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sta_phy *phy = data;
+ struct sta_rec_ra_fixed *ra;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
+ ra = (struct sta_rec_ra_fixed *)tlv;
+
+ switch (field) {
+ case RATE_PARAM_AUTO:
+ break;
+ case RATE_PARAM_FIXED:
+ case RATE_PARAM_FIXED_MCS:
+ case RATE_PARAM_FIXED_GI:
+ case RATE_PARAM_FIXED_HE_LTF:
+ if (phy)
+ ra->phy = *phy;
+ break;
+ case RATE_PARAM_MMPS_UPDATE:
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
+ break;
+ case RATE_PARAM_SPE_UPDATE:
+ ra->spe_idx = *(u8 *)data;
+ break;
+ default:
+ break;
+ }
+ ra->field = cpu_to_le32(field);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
+
+int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *sta_wtbl;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ sta_wtbl = mt76_connac_mcu_add_tlv(skb, STA_REC_WTBL,
+ sizeof(struct tlv));
+ wtbl_hdr = mt76_connac_mcu_alloc_wtbl_req(&dev->mt76, &msta->wcid,
+ WTBL_SET, sta_wtbl, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt76_connac_mcu_wtbl_smps_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, NULL,
+ RATE_PARAM_MMPS_UPDATE);
+}
+
+static int
+mt7915_mcu_set_spe_idx(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt76_phy *mphy = mvif->phy->mt76;
+ u8 spe_idx = mt76_connac_spe_idx(mphy->antenna_mask);
+
+ return mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &spe_idx,
+ RATE_PARAM_SPE_UPDATE);
+}
+
+static int
+mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+ enum nl80211_band band = chandef->chan->band;
+ struct sta_phy phy = {};
+ int ret, nrates = 0;
+
+#define __sta_phy_bitrate_mask_check(_mcs, _gi, _ht, _he) \
+ do { \
+ u8 i, gi = mask->control[band]._gi; \
+ gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \
+ for (i = 0; i <= sta->deflink.bandwidth; i++) { \
+ phy.sgi |= gi << (i << (_he)); \
+ phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\
+ } \
+ for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) { \
+ if (!mask->control[band]._mcs[i]) \
+ continue; \
+ nrates += hweight16(mask->control[band]._mcs[i]); \
+ phy.mcs = ffs(mask->control[band]._mcs[i]) - 1; \
+ if (_ht) \
+ phy.mcs += 8 * i; \
+ } \
+ } while (0)
+
+ if (sta->deflink.he_cap.has_he) {
+ __sta_phy_bitrate_mask_check(he_mcs, he_gi, 0, 1);
+ } else if (sta->deflink.vht_cap.vht_supported) {
+ __sta_phy_bitrate_mask_check(vht_mcs, gi, 0, 0);
+ } else if (sta->deflink.ht_cap.ht_supported) {
+ __sta_phy_bitrate_mask_check(ht_mcs, gi, 1, 0);
+ } else {
+ nrates = hweight32(mask->control[band].legacy);
+ phy.mcs = ffs(mask->control[band].legacy) - 1;
+ }
+#undef __sta_phy_bitrate_mask_check
+
+ /* fall back to auto rate control */
+ if (mask->control[band].gi == NL80211_TXRATE_DEFAULT_GI &&
+ mask->control[band].he_gi == GENMASK(7, 0) &&
+ mask->control[band].he_ltf == GENMASK(7, 0) &&
+ nrates != 1)
+ return 0;
+
+ /* fixed single rate */
+ if (nrates == 1) {
+ ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy,
+ RATE_PARAM_FIXED_MCS);
+ if (ret)
+ return ret;
+ }
+
+ /* fixed GI */
+ if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI ||
+ mask->control[band].he_gi != GENMASK(7, 0)) {
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ u32 addr;
+
+ /* firmware updates only TXCMD but doesn't take WTBL into
+ * account, so driver should update here to reflect the
+ * actual txrate hardware sends out.
+ */
+ addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7);
+ if (sta->deflink.he_cap.has_he)
+ mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi);
+ else
+ mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi);
+
+ ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy,
+ RATE_PARAM_FIXED_GI);
+ if (ret)
+ return ret;
+ }
+
+ /* fixed HE_LTF */
+ if (mask->control[band].he_ltf != GENMASK(7, 0)) {
+ ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy,
+ RATE_PARAM_FIXED_HE_LTF);
+ if (ret)
+ return ret;
+ }
+
+ return mt7915_mcu_set_spe_idx(dev, vif, sta);
+}
+
+static void
+mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt76_phy *mphy = mvif->phy->mt76;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
+ struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+ enum nl80211_band band = chandef->chan->band;
+ struct sta_rec_ra *ra;
+ struct tlv *tlv;
+ u32 supp_rate = sta->deflink.supp_rates[band];
+ u32 cap = sta->wme ? STA_CAP_WMM : 0;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra));
+ ra = (struct sta_rec_ra *)tlv;
+
+ ra->valid = true;
+ ra->auto_rate = true;
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
+ ra->channel = chandef->chan->hw_value;
+ ra->bw = sta->deflink.bandwidth;
+ ra->phy.bw = sta->deflink.bandwidth;
+ ra->mmps_mode = mt7915_mcu_get_mmps_mode(sta->deflink.smps_mode);
+
+ if (supp_rate) {
+ supp_rate &= mask->control[band].legacy;
+ ra->rate_len = hweight32(supp_rate);
+
+ if (band == NL80211_BAND_2GHZ) {
+ ra->supp_mode = MODE_CCK;
+ ra->supp_cck_rate = supp_rate & GENMASK(3, 0);
+
+ if (ra->rate_len > 4) {
+ ra->supp_mode |= MODE_OFDM;
+ ra->supp_ofdm_rate = supp_rate >> 4;
+ }
+ } else {
+ ra->supp_mode = MODE_OFDM;
+ ra->supp_ofdm_rate = supp_rate;
+ }
+ }
+
+ if (sta->deflink.ht_cap.ht_supported) {
+ ra->supp_mode |= MODE_HT;
+ ra->af = sta->deflink.ht_cap.ampdu_factor;
+ ra->ht_gf = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
+
+ cap |= STA_CAP_HT;
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
+ cap |= STA_CAP_SGI_20;
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
+ cap |= STA_CAP_SGI_40;
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)
+ cap |= STA_CAP_TX_STBC;
+ if (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ cap |= STA_CAP_RX_STBC;
+ if (mvif->cap.ht_ldpc &&
+ (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
+ cap |= STA_CAP_LDPC;
+
+ mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs,
+ mask->control[band].ht_mcs);
+ ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
+ }
+
+ if (sta->deflink.vht_cap.vht_supported) {
+ u8 af;
+
+ ra->supp_mode |= MODE_VHT;
+ af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
+ sta->deflink.vht_cap.cap);
+ ra->af = max_t(u8, ra->af, af);
+
+ cap |= STA_CAP_VHT;
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+ cap |= STA_CAP_VHT_SGI_80;
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+ cap |= STA_CAP_VHT_SGI_160;
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
+ cap |= STA_CAP_VHT_TX_STBC;
+ if (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
+ cap |= STA_CAP_VHT_RX_STBC;
+ if (mvif->cap.vht_ldpc &&
+ (sta->deflink.vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
+ cap |= STA_CAP_VHT_LDPC;
+
+ mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs,
+ mask->control[band].vht_mcs);
+ }
+
+ if (sta->deflink.he_cap.has_he) {
+ ra->supp_mode |= MODE_HE;
+ cap |= STA_CAP_HE;
+
+ if (sta->deflink.he_6ghz_capa.capa)
+ ra->af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ }
+
+ ra->sta_cap = cpu_to_le32(cap);
+}
+
+int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool changed)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* firmware rc algorithm refers to sta_rec_he for HE control.
+ * once dev->rc_work changes the settings driver should also
+ * update sta_rec_he here.
+ */
+ if (changed)
+ mt7915_mcu_sta_he_tlv(skb, sta, vif);
+
+ /* sta_rec_ra accommodates BW, NSS and only MCS range format
+ * i.e 0-{7,8,9} for VHT.
+ */
+ mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta);
+
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+ if (ret)
+ return ret;
+
+ /* sta_rec_ra_fixed accommodates single rate, (HE)GI and HE_LTE,
+ * and updates as peer fixed rate parameters, which overrides
+ * sta_rec_ra and firmware rate control algorithm.
+ */
+ return mt7915_mcu_add_rate_ctrl_fixed(dev, vif, sta);
+}
+
+static int
+mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+#define MT_STA_BSS_GROUP 1
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta;
+ struct {
+ __le32 action;
+ u8 wlan_idx_lo;
+ u8 status;
+ u8 wlan_idx_hi;
+ u8 rsv0[5];
+ __le32 val;
+ u8 rsv1[8];
+ } __packed req = {
+ .action = cpu_to_le32(MT_STA_BSS_GROUP),
+ .val = cpu_to_le32(mvif->mt76.idx % 16),
+ };
+
+ msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+ req.wlan_idx_lo = to_wcid_lo(msta->wcid.idx);
+ req.wlan_idx_hi = to_wcid_hi(msta->wcid.idx);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_DRR_CTRL), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta;
+ struct sk_buff *skb;
+ int ret;
+
+ msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+
+ skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ &msta->wcid);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ /* starec basic */
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable,
+ !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
+ if (!enable)
+ goto out;
+
+ /* tag order is in accordance with firmware dependency. */
+ if (sta) {
+ /* starec bfer */
+ mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
+ /* starec ht */
+ mt7915_mcu_sta_ht_tlv(skb, sta);
+ /* starec vht */
+ mt7915_mcu_sta_vht_tlv(skb, sta);
+ /* starec uapsd */
+ mt76_connac_mcu_sta_uapsd(skb, vif, sta);
+ }
+
+ ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ if (sta) {
+ /* starec amsdu */
+ mt7915_mcu_sta_amsdu_tlv(dev, skb, vif, sta);
+ /* starec he */
+ mt7915_mcu_sta_he_tlv(skb, sta, vif);
+ /* starec muru */
+ mt7915_mcu_sta_muru_tlv(dev, skb, sta, vif);
+ /* starec bfee */
+ mt7915_mcu_sta_bfee_tlv(dev, skb, vif, sta);
+ }
+
+ ret = mt7915_mcu_add_group(dev, vif, sta);
+ if (ret) {
+ dev_kfree_skb(skb);
+ return ret;
+ }
+out:
+ ret = mt76_connac_mcu_sta_wed_update(&dev->mt76, skb);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
+
+int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ struct {
+ __le32 args[2];
+ } req = {
+ .args[0] = cpu_to_le32(1),
+ .args[1] = cpu_to_le32(6),
+ };
+
+ return mtk_wed_device_update_msg(wed, MTK_WED_WO_CMD_RXCNT_CTRL,
+ &req, sizeof(req));
+#else
+ return 0;
+#endif
+}
+
+int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif, bool enable)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct {
+ struct req_hdr {
+ u8 omac_idx;
+ u8 band_idx;
+ __le16 tlv_num;
+ u8 is_tlv_append;
+ u8 rsv[3];
+ } __packed hdr;
+ struct req_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 active;
+ u8 band_idx;
+ u8 omac_addr[ETH_ALEN];
+ } __packed tlv;
+ } data = {
+ .hdr = {
+ .omac_idx = mvif->mt76.omac_idx,
+ .band_idx = mvif->mt76.band_idx,
+ .tlv_num = cpu_to_le16(1),
+ .is_tlv_append = 1,
+ },
+ .tlv = {
+ .tag = cpu_to_le16(DEV_INFO_ACTIVE),
+ .len = cpu_to_le16(sizeof(struct req_tlv)),
+ .active = enable,
+ .band_idx = mvif->mt76.band_idx,
+ },
+ };
+
+ if (mvif->mt76.omac_idx >= REPEATER_BSSID_START)
+ return mt7915_mcu_muar_config(phy, vif, false, enable);
+
+ memcpy(data.tlv.omac_addr, vif->addr, ETH_ALEN);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(DEV_INFO_UPDATE),
+ &data, sizeof(data), true);
+}
+
+static void
+mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
+ struct sk_buff *skb, struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct bss_info_bcn_cntdwn *info;
+ struct tlv *tlv;
+ int sub_tag;
+
+ if (!offs->cntdwn_counter_offs[0])
+ return;
+
+ sub_tag = vif->bss_conf.csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC;
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, sub_tag, sizeof(*info),
+ &bcn->sub_ntlv, &bcn->len);
+ info = (struct bss_info_bcn_cntdwn *)tlv;
+ info->cnt = skb->data[offs->cntdwn_counter_offs[0]];
+}
+
+static void
+mt7915_mcu_beacon_mbss(struct sk_buff *rskb, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct bss_info_bcn_mbss *mbss;
+ const struct element *elem;
+ struct tlv *tlv;
+
+ if (!vif->bss_conf.bssid_indicator)
+ return;
+
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_MBSSID,
+ sizeof(*mbss), &bcn->sub_ntlv,
+ &bcn->len);
+
+ mbss = (struct bss_info_bcn_mbss *)tlv;
+ mbss->offset[0] = cpu_to_le16(offs->tim_offset);
+ mbss->bitmap = cpu_to_le32(1);
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID,
+ &skb->data[offs->mbssid_off],
+ skb->len - offs->mbssid_off) {
+ const struct element *sub_elem;
+
+ if (elem->datalen < 2)
+ continue;
+
+ for_each_element(sub_elem, elem->data + 1, elem->datalen - 1) {
+ const struct ieee80211_bssid_index *idx;
+ const u8 *idx_ie;
+
+ if (sub_elem->id || sub_elem->datalen < 4)
+ continue; /* not a valid BSS profile */
+
+ /* Find WLAN_EID_MULTI_BSSID_IDX
+ * in the merged nontransmitted profile
+ */
+ idx_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX,
+ sub_elem->data,
+ sub_elem->datalen);
+ if (!idx_ie || idx_ie[1] < sizeof(*idx))
+ continue;
+
+ idx = (void *)(idx_ie + 2);
+ if (!idx->bssid_index || idx->bssid_index > 31)
+ continue;
+
+ mbss->offset[idx->bssid_index] =
+ cpu_to_le16(idx_ie - skb->data);
+ mbss->bitmap |= cpu_to_le32(BIT(idx->bssid_index));
+ }
+ }
+}
+
+static void
+mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct sk_buff *rskb, struct sk_buff *skb,
+ struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
+{
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct bss_info_bcn_cont *cont;
+ struct tlv *tlv;
+ u8 *buf;
+ int len = sizeof(*cont) + MT_TXD_SIZE + skb->len;
+
+ len = (len & 0x3) ? ((len | 0x3) + 1) : len;
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CONTENT,
+ len, &bcn->sub_ntlv, &bcn->len);
+
+ cont = (struct bss_info_bcn_cont *)tlv;
+ cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ cont->tim_ofs = cpu_to_le16(offs->tim_offset);
+
+ if (offs->cntdwn_counter_offs[0]) {
+ u16 offset = offs->cntdwn_counter_offs[0];
+
+ if (vif->bss_conf.csa_active)
+ cont->csa_ofs = cpu_to_le16(offset - 4);
+ if (vif->bss_conf.color_change_active)
+ cont->bcc_ofs = cpu_to_le16(offset - 3);
+ }
+
+ buf = (u8 *)tlv + sizeof(*cont);
+ mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
+ 0, BSS_CHANGED_BEACON);
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+}
+
+int
+mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ u32 changed)
+{
+#define OFFLOAD_TX_MODE_SU BIT(0)
+#define OFFLOAD_TX_MODE_MU BIT(1)
+ struct ieee80211_hw *hw = mt76_hw(dev);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ enum nl80211_band band = chandef->chan->band;
+ struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ struct bss_info_bcn *bcn;
+ struct bss_info_inband_discovery *discov;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *rskb, *skb = NULL;
+ struct tlv *tlv, *sub_tlv;
+ bool ext_phy = phy != &dev->phy;
+ u8 *buf, interval;
+ int len;
+
+ if (vif->bss_conf.nontransmitted)
+ return 0;
+
+ rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, NULL,
+ MT7915_MAX_BSS_OFFLOAD_SIZE);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+ tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ bcn = (struct bss_info_bcn *)tlv;
+ bcn->enable = true;
+
+ if (changed & BSS_CHANGED_FILS_DISCOVERY &&
+ vif->bss_conf.fils_discovery.max_interval) {
+ interval = vif->bss_conf.fils_discovery.max_interval;
+ skb = ieee80211_get_fils_discovery_tmpl(hw, vif);
+ } else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP &&
+ vif->bss_conf.unsol_bcast_probe_resp_interval) {
+ interval = vif->bss_conf.unsol_bcast_probe_resp_interval;
+ skb = ieee80211_get_unsol_bcast_probe_resp_tmpl(hw, vif);
+ }
+
+ if (!skb) {
+ dev_kfree_skb(rskb);
+ return -EINVAL;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ info->control.vif = vif;
+ info->band = band;
+ info->hw_queue |= FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
+
+ len = sizeof(*discov) + MT_TXD_SIZE + skb->len;
+ len = (len & 0x3) ? ((len | 0x3) + 1) : len;
+
+ if (skb->len > MT7915_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "inband discovery size limit exceed\n");
+ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ sub_tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_DISCOV,
+ len, &bcn->sub_ntlv, &bcn->len);
+ discov = (struct bss_info_inband_discovery *)sub_tlv;
+ discov->tx_mode = OFFLOAD_TX_MODE_SU;
+ /* 0: UNSOL PROBE RESP, 1: FILS DISCOV */
+ discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
+ discov->tx_interval = interval;
+ discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
+ discov->enable = true;
+
+ buf = (u8 *)sub_tlv + sizeof(*discov);
+
+ mt7915_mac_write_txwi(&dev->mt76, (__le32 *)buf, skb, wcid, 0, NULL,
+ 0, changed);
+ memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
+
+ dev_kfree_skb(skb);
+
+ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+}
+
+int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int en, u32 changed)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_mutable_offsets offs;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb, *rskb;
+ struct tlv *tlv;
+ struct bss_info_bcn *bcn;
+ int len = MT7915_MAX_BSS_OFFLOAD_SIZE;
+ bool ext_phy = phy != &dev->phy;
+
+ if (vif->bss_conf.nontransmitted)
+ return 0;
+
+ rskb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
+ if (IS_ERR(rskb))
+ return PTR_ERR(rskb);
+
+ tlv = mt76_connac_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ bcn = (struct bss_info_bcn *)tlv;
+ bcn->enable = en;
+
+ if (!en)
+ goto out;
+
+ skb = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (!skb) {
+ dev_kfree_skb(rskb);
+ return -EINVAL;
+ }
+
+ if (skb->len > MT7915_MAX_BEACON_SIZE) {
+ dev_err(dev->mt76.dev, "Bcn size limit exceed\n");
+ dev_kfree_skb(rskb);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ info = IEEE80211_SKB_CB(skb);
+ info->hw_queue = FIELD_PREP(MT_TX_HW_QUEUE_PHY, ext_phy);
+
+ mt7915_mcu_beacon_cntdwn(vif, rskb, skb, bcn, &offs);
+ mt7915_mcu_beacon_mbss(rskb, skb, vif, bcn, &offs);
+ mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
+ dev_kfree_skb(skb);
+
+out:
+ return mt76_mcu_skb_send_msg(&phy->dev->mt76, rskb,
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+}
+
+static int mt7915_driver_own(struct mt7915_dev *dev, u8 band)
+{
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(band), MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, MT_TOP_LPCR_HOST_BAND(band),
+ MT_TOP_LPCR_HOST_FW_OWN_STAT, 0, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for driver own\n");
+ return -EIO;
+ }
+
+ /* clear irq when the driver own success */
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND_IRQ_STAT(band),
+ MT_TOP_LPCR_HOST_BAND_STAT);
+
+ return 0;
+}
+
+static int
+mt7915_firmware_state(struct mt7915_dev *dev, bool wa)
+{
+ u32 state = FIELD_PREP(MT_TOP_MISC_FW_STATE,
+ wa ? FW_STATE_RDY : FW_STATE_FW_DOWNLOAD);
+
+ if (!mt76_poll_msec(dev, MT_TOP_MISC, MT_TOP_MISC_FW_STATE,
+ state, 1000)) {
+ dev_err(dev->mt76.dev, "Timeout for initializing firmware\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int mt7915_load_firmware(struct mt7915_dev *dev)
+{
+ int ret;
+
+ /* make sure fw is download state */
+ if (mt7915_firmware_state(dev, false)) {
+ /* restart firmware once */
+ mt76_connac_mcu_restart(&dev->mt76);
+ ret = mt7915_firmware_state(dev, false);
+ if (ret) {
+ dev_err(dev->mt76.dev,
+ "Firmware is not ready for download\n");
+ return ret;
+ }
+ }
+
+ ret = mt76_connac2_load_patch(&dev->mt76, fw_name_var(dev, ROM_PATCH));
+ if (ret)
+ return ret;
+
+ ret = mt76_connac2_load_ram(&dev->mt76, fw_name_var(dev, FIRMWARE_WM),
+ fw_name(dev, FIRMWARE_WA));
+ if (ret)
+ return ret;
+
+ ret = mt7915_firmware_state(dev, true);
+ if (ret)
+ return ret;
+
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
+
+ dev_dbg(dev->mt76.dev, "Firmware init done\n");
+
+ return 0;
+}
+
+int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl)
+{
+ struct {
+ u8 ctrl_val;
+ u8 pad[3];
+ } data = {
+ .ctrl_val = ctrl
+ };
+
+ if (type == MCU_FW_LOG_WA)
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(FW_LOG_2_HOST),
+ &data, sizeof(data), true);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST), &data,
+ sizeof(data), true);
+}
+
+int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level)
+{
+ struct {
+ u8 ver;
+ u8 pad;
+ __le16 len;
+ u8 level;
+ u8 rsv[3];
+ __le32 module_idx;
+ } data = {
+ .module_idx = cpu_to_le32(module),
+ .level = level,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_DBG_CTRL), &data,
+ sizeof(data), false);
+}
+
+int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enabled)
+{
+ struct {
+ __le32 cmd;
+ u8 enable;
+ } data = {
+ .cmd = cpu_to_le32(MURU_SET_TXC_TX_STATS_EN),
+ .enable = enabled,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &data,
+ sizeof(data), false);
+}
+
+int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct sk_buff *skb;
+ struct mt7915_mcu_muru_stats *mu_stats;
+ int ret;
+
+ struct {
+ __le32 cmd;
+ u8 band_idx;
+ } req = {
+ .cmd = cpu_to_le32(MURU_GET_TXC_TX_STATS),
+ .band_idx = phy->mt76->band_idx,
+ };
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ mu_stats = (struct mt7915_mcu_muru_stats *)(skb->data);
+
+ /* accumulate stats, these are clear-on-read */
+#define __dl_u32(s) phy->mib.dl_##s += le32_to_cpu(mu_stats->dl.s)
+#define __ul_u32(s) phy->mib.ul_##s += le32_to_cpu(mu_stats->ul.s)
+ __dl_u32(cck_cnt);
+ __dl_u32(ofdm_cnt);
+ __dl_u32(htmix_cnt);
+ __dl_u32(htgf_cnt);
+ __dl_u32(vht_su_cnt);
+ __dl_u32(vht_2mu_cnt);
+ __dl_u32(vht_3mu_cnt);
+ __dl_u32(vht_4mu_cnt);
+ __dl_u32(he_su_cnt);
+ __dl_u32(he_2ru_cnt);
+ __dl_u32(he_2mu_cnt);
+ __dl_u32(he_3ru_cnt);
+ __dl_u32(he_3mu_cnt);
+ __dl_u32(he_4ru_cnt);
+ __dl_u32(he_4mu_cnt);
+ __dl_u32(he_5to8ru_cnt);
+ __dl_u32(he_9to16ru_cnt);
+ __dl_u32(he_gtr16ru_cnt);
+
+ __ul_u32(hetrig_su_cnt);
+ __ul_u32(hetrig_2ru_cnt);
+ __ul_u32(hetrig_3ru_cnt);
+ __ul_u32(hetrig_4ru_cnt);
+ __ul_u32(hetrig_5to8ru_cnt);
+ __ul_u32(hetrig_9to16ru_cnt);
+ __ul_u32(hetrig_gtr16ru_cnt);
+ __ul_u32(hetrig_2mu_cnt);
+ __ul_u32(hetrig_3mu_cnt);
+ __ul_u32(hetrig_4mu_cnt);
+#undef __dl_u32
+#undef __ul_u32
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int mt7915_mcu_set_mwds(struct mt7915_dev *dev, bool enabled)
+{
+ struct {
+ u8 enable;
+ u8 _rsv[3];
+ } __packed req = {
+ .enable = enabled
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(MWDS_SUPPORT), &req,
+ sizeof(req), false);
+}
+
+int mt7915_mcu_set_muru_ctrl(struct mt7915_dev *dev, u32 cmd, u32 val)
+{
+ struct {
+ __le32 cmd;
+ u8 val[4];
+ } __packed req = {
+ .cmd = cpu_to_le32(cmd),
+ };
+
+ put_unaligned_le32(val, req.val);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev)
+{
+#define RX_AIRTIME_FEATURE_CTRL 1
+#define RX_AIRTIME_BITWISE_CTRL 2
+#define RX_AIRTIME_CLEAR_EN 1
+ struct {
+ __le16 field;
+ __le16 sub_field;
+ __le32 set_status;
+ __le32 get_status;
+ u8 _rsv[12];
+
+ bool airtime_en;
+ bool mibtime_en;
+ bool earlyend_en;
+ u8 _rsv1[9];
+
+ bool airtime_clear;
+ bool mibtime_clear;
+ u8 _rsv2[98];
+ } __packed req = {
+ .field = cpu_to_le16(RX_AIRTIME_BITWISE_CTRL),
+ .sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN),
+ .airtime_clear = true,
+ };
+ int ret;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req,
+ sizeof(req), true);
+ if (ret)
+ return ret;
+
+ req.field = cpu_to_le16(RX_AIRTIME_FEATURE_CTRL);
+ req.sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN);
+ req.airtime_en = true;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req,
+ sizeof(req), true);
+}
+
+static int mt7915_red_set_watermark(struct mt7915_dev *dev)
+{
+#define RED_GLOBAL_TOKEN_WATERMARK 2
+ struct {
+ __le32 args[3];
+ u8 cmd;
+ u8 version;
+ u8 __rsv1[4];
+ __le16 len;
+ __le16 high_mark;
+ __le16 low_mark;
+ u8 __rsv2[12];
+ } __packed req = {
+ .args[0] = cpu_to_le32(MCU_WA_PARAM_RED_SETTING),
+ .cmd = RED_GLOBAL_TOKEN_WATERMARK,
+ .len = cpu_to_le16(sizeof(req) - sizeof(req.args)),
+ .high_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256),
+ .low_mark = cpu_to_le16(MT7915_HW_TOKEN_SIZE - 256 - 1536),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_PARAM_CMD(SET), &req,
+ sizeof(req), false);
+}
+
+static int mt7915_mcu_set_red(struct mt7915_dev *dev, bool enabled)
+{
+#define RED_DISABLE 0
+#define RED_BY_WA_ENABLE 2
+ int ret;
+ u32 red_type = enabled ? RED_BY_WA_ENABLE : RED_DISABLE;
+ __le32 req = cpu_to_le32(red_type);
+
+ if (enabled) {
+ ret = mt7915_red_set_watermark(dev);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RED_ENABLE), &req,
+ sizeof(req), false);
+ if (ret < 0)
+ return ret;
+
+ return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
+ MCU_WA_PARAM_RED, enabled, 0);
+}
+
+int mt7915_mcu_init_firmware(struct mt7915_dev *dev)
+{
+ int ret;
+
+ /* force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+
+ ret = mt7915_driver_own(dev, 0);
+ if (ret)
+ return ret;
+ /* set driver own for band1 when two hif exist */
+ if (dev->hif2) {
+ ret = mt7915_driver_own(dev, 1);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7915_load_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, 0);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0);
+ if (ret)
+ return ret;
+
+ if ((mtk_wed_device_active(&dev->mt76.mmio.wed) &&
+ is_mt7915(&dev->mt76)) ||
+ !mtk_wed_get_rx_capa(&dev->mt76.mmio.wed))
+ mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(CAPABILITY), 0, 0, 0);
+
+ ret = mt7915_mcu_set_mwds(dev, 1);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_set_muru_ctrl(dev, MURU_SET_PLATFORM_TYPE,
+ MURU_PLATFORM_TYPE_PERF_LEVEL_2);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_init_rx_airtime(dev);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_set_red(dev, mtk_wed_device_active(&dev->mt76.mmio.wed));
+}
+
+int mt7915_mcu_init(struct mt7915_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7915_mcu_ops = {
+ .headroom = sizeof(struct mt76_connac2_mcu_txd),
+ .mcu_skb_send_msg = mt7915_mcu_send_message,
+ .mcu_parse_response = mt7915_mcu_parse_response,
+ };
+
+ dev->mt76.mcu_ops = &mt7915_mcu_ops;
+
+ return mt7915_mcu_init_firmware(dev);
+}
+
+void mt7915_mcu_exit(struct mt7915_dev *dev)
+{
+ mt76_connac_mcu_restart(&dev->mt76);
+ if (mt7915_firmware_state(dev, false)) {
+ dev_err(dev->mt76.dev, "Failed to exit mcu\n");
+ goto out;
+ }
+
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(0), MT_TOP_LPCR_HOST_FW_OWN);
+ if (dev->hif2)
+ mt76_wr(dev, MT_TOP_LPCR_HOST_BAND(1),
+ MT_TOP_LPCR_HOST_FW_OWN);
+out:
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+}
+
+static int
+mt7915_mcu_set_rx_hdr_trans_blacklist(struct mt7915_dev *dev, int band)
+{
+ struct {
+ u8 operation;
+ u8 count;
+ u8 _rsv[2];
+ u8 index;
+ u8 enable;
+ __le16 etype;
+ } req = {
+ .operation = 1,
+ .count = 1,
+ .enable = 1,
+ .etype = cpu_to_le16(ETH_P_PAE),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS),
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band,
+ bool enable, bool hdr_trans)
+{
+ struct {
+ u8 operation;
+ u8 enable;
+ u8 check_bssid;
+ u8 insert_vlan;
+ u8 remove_vlan;
+ u8 tid;
+ u8 mode;
+ u8 rsv;
+ } __packed req_trans = {
+ .enable = hdr_trans,
+ };
+ struct {
+ u8 enable;
+ u8 band;
+ u8 rsv[2];
+ } __packed req_mac = {
+ .enable = enable,
+ .band = band,
+ };
+ int ret;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_HDR_TRANS),
+ &req_trans, sizeof(req_trans), false);
+ if (ret)
+ return ret;
+
+ if (hdr_trans)
+ mt7915_mcu_set_rx_hdr_trans_blacklist(dev, band);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MAC_INIT_CTRL),
+ &req_mac, sizeof(req_mac), true);
+}
+
+int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *param)
+{
+ struct mt7915_mcu_tx *req = (struct mt7915_mcu_tx *)param;
+ u8 num = req->total;
+ size_t len = sizeof(*req) -
+ (IEEE80211_NUM_ACS - num) * sizeof(struct edca);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EDCA_UPDATE), req,
+ len, true);
+}
+
+int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
+{
+#define TX_CMD_MODE 1
+ struct mt7915_mcu_tx req = {
+ .valid = true,
+ .mode = TX_CMD_MODE,
+ .total = IEEE80211_NUM_ACS,
+ };
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ int ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct edca *e = &req.edca[ac];
+
+ e->set = WMM_PARAM_SET;
+ e->queue = ac + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
+ e->aifs = q->aifs;
+ e->txop = cpu_to_le16(q->txop);
+
+ if (q->cw_min)
+ e->cw_min = fls(q->cw_min);
+ else
+ e->cw_min = 5;
+
+ if (q->cw_max)
+ e->cw_max = cpu_to_le16(fls(q->cw_max));
+ else
+ e->cw_max = cpu_to_le16(10);
+ }
+
+ return mt7915_mcu_update_edca(dev, &req);
+}
+
+int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val)
+{
+ struct {
+ __le32 tag;
+ __le16 min_lpn;
+ u8 rsv[2];
+ } __packed req = {
+ .tag = cpu_to_le32(0x1),
+ .min_lpn = cpu_to_le16(val),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
+ const struct mt7915_dfs_pulse *pulse)
+{
+ struct {
+ __le32 tag;
+
+ __le32 max_width; /* us */
+ __le32 max_pwr; /* dbm */
+ __le32 min_pwr; /* dbm */
+ __le32 min_stgr_pri; /* us */
+ __le32 max_stgr_pri; /* us */
+ __le32 min_cr_pri; /* us */
+ __le32 max_cr_pri; /* us */
+ } __packed req = {
+ .tag = cpu_to_le32(0x3),
+
+#define __req_field(field) .field = cpu_to_le32(pulse->field)
+ __req_field(max_width),
+ __req_field(max_pwr),
+ __req_field(min_pwr),
+ __req_field(min_stgr_pri),
+ __req_field(max_stgr_pri),
+ __req_field(min_cr_pri),
+ __req_field(max_cr_pri),
+#undef __req_field
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
+ const struct mt7915_dfs_pattern *pattern)
+{
+ struct {
+ __le32 tag;
+ __le16 radar_type;
+
+ u8 enb;
+ u8 stgr;
+ u8 min_crpn;
+ u8 max_crpn;
+ u8 min_crpr;
+ u8 min_pw;
+ __le32 min_pri;
+ __le32 max_pri;
+ u8 max_pw;
+ u8 min_crbn;
+ u8 max_crbn;
+ u8 min_stgpn;
+ u8 max_stgpn;
+ u8 min_stgpr;
+ u8 rsv[2];
+ __le32 min_stgpr_diff;
+ } __packed req = {
+ .tag = cpu_to_le32(0x2),
+ .radar_type = cpu_to_le16(index),
+
+#define __req_field_u8(field) .field = pattern->field
+#define __req_field_u32(field) .field = cpu_to_le32(pattern->field)
+ __req_field_u8(enb),
+ __req_field_u8(stgr),
+ __req_field_u8(min_crpn),
+ __req_field_u8(max_crpn),
+ __req_field_u8(min_crpr),
+ __req_field_u8(min_pw),
+ __req_field_u32(min_pri),
+ __req_field_u32(max_pri),
+ __req_field_u8(max_pw),
+ __req_field_u8(min_crbn),
+ __req_field_u8(max_crbn),
+ __req_field_u8(min_stgpn),
+ __req_field_u8(max_stgpn),
+ __req_field_u8(min_stgpr),
+ __req_field_u32(min_stgpr_diff),
+#undef __req_field_u8
+#undef __req_field_u32
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_RDD_TH), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_mcu_background_chain_ctrl(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef,
+ int cmd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_channel *chan = mphy->chandef.chan;
+ int freq = mphy->chandef.center_freq1;
+ struct mt7915_mcu_background_chain_ctrl req = {
+ .monitor_scan_type = 2, /* simple rx */
+ };
+
+ if (!chandef && cmd != CH_SWITCH_BACKGROUND_SCAN_STOP)
+ return -EINVAL;
+
+ if (!cfg80211_chandef_valid(&mphy->chandef))
+ return -EINVAL;
+
+ switch (cmd) {
+ case CH_SWITCH_BACKGROUND_SCAN_START: {
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.monitor_bw = mt76_connac_chan_bw(chandef);
+ req.band_idx = phy->mt76->band_idx;
+ req.scan_mode = 1;
+ break;
+ }
+ case CH_SWITCH_BACKGROUND_SCAN_RUNNING:
+ req.monitor_chan = chandef->chan->hw_value;
+ req.monitor_central_chan =
+ ieee80211_frequency_to_channel(chandef->center_freq1);
+ req.band_idx = phy->mt76->band_idx;
+ req.scan_mode = 2;
+ break;
+ case CH_SWITCH_BACKGROUND_SCAN_STOP:
+ req.chan = chan->hw_value;
+ req.central_chan = ieee80211_frequency_to_channel(freq);
+ req.bw = mt76_connac_chan_bw(&mphy->chandef);
+ req.tx_stream = hweight8(mphy->antenna_mask);
+ req.rx_stream = mphy->antenna_mask;
+ break;
+ default:
+ return -EINVAL;
+ }
+ req.band = chandef ? chandef->chan->band == NL80211_BAND_5GHZ : 1;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(OFFCH_SCAN_CTRL),
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef)
+{
+ struct mt7915_dev *dev = phy->dev;
+ int err, region;
+
+ if (!chandef) { /* disable offchain */
+ err = mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_STOP, MT_RX_SEL2,
+ 0, 0);
+ if (err)
+ return err;
+
+ return mt7915_mcu_background_chain_ctrl(phy, NULL,
+ CH_SWITCH_BACKGROUND_SCAN_STOP);
+ }
+
+ err = mt7915_mcu_background_chain_ctrl(phy, chandef,
+ CH_SWITCH_BACKGROUND_SCAN_START);
+ if (err)
+ return err;
+
+ switch (dev->mt76.region) {
+ case NL80211_DFS_ETSI:
+ region = 0;
+ break;
+ case NL80211_DFS_JP:
+ region = 2;
+ break;
+ case NL80211_DFS_FCC:
+ default:
+ region = 1;
+ break;
+ }
+
+ return mt76_connac_mcu_rdd_cmd(&dev->mt76, RDD_START, MT_RX_SEL2,
+ 0, region);
+}
+
+int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
+{
+ static const u8 ch_band[] = {
+ [NL80211_BAND_2GHZ] = 0,
+ [NL80211_BAND_5GHZ] = 1,
+ [NL80211_BAND_6GHZ] = 2,
+ };
+ struct mt7915_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = chandef->center_freq1;
+ u8 band = phy->mt76->band_idx;
+ struct {
+ u8 control_ch;
+ u8 center_ch;
+ u8 bw;
+ u8 tx_path_num;
+ u8 rx_path; /* mask or num */
+ u8 switch_reason;
+ u8 band_idx;
+ u8 center_ch2; /* for 80+80 only */
+ __le16 cac_case;
+ u8 channel_band;
+ u8 rsv0;
+ __le32 outband_freq;
+ u8 txpower_drop;
+ u8 ap_bw;
+ u8 ap_center_ch;
+ u8 rsv1[57];
+ } __packed req = {
+ .control_ch = chandef->chan->hw_value,
+ .center_ch = ieee80211_frequency_to_channel(freq1),
+ .bw = mt76_connac_chan_bw(chandef),
+ .tx_path_num = hweight16(phy->mt76->chainmask),
+ .rx_path = phy->mt76->chainmask >> (dev->chainshift * band),
+ .band_idx = band,
+ .channel_band = ch_band[chandef->chan->band],
+ };
+
+#ifdef CONFIG_NL80211_TESTMODE
+ if (phy->mt76->test.tx_antenna_mask &&
+ mt76_testmode_enabled(phy->mt76)) {
+ req.tx_path_num = fls(phy->mt76->test.tx_antenna_mask);
+ req.rx_path = phy->mt76->test.tx_antenna_mask;
+ }
+#endif
+
+ if (mt76_connac_spe_idx(phy->mt76->antenna_mask))
+ req.tx_path_num = fls(phy->mt76->antenna_mask);
+
+ if (phy->mt76->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ req.switch_reason = CH_SWITCH_NORMAL;
+ else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL ||
+ phy->mt76->hw->conf.flags & IEEE80211_CONF_IDLE)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
+ NL80211_IFTYPE_AP))
+ req.switch_reason = CH_SWITCH_DFS;
+ else
+ req.switch_reason = CH_SWITCH_NORMAL;
+
+ if (cmd == MCU_EXT_CMD(CHANNEL_SWITCH))
+ req.rx_path = hweight8(req.rx_path);
+
+ if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
+ int freq2 = chandef->center_freq2;
+
+ req.center_ch2 = ieee80211_frequency_to_channel(freq2);
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
+}
+
+static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
+{
+#define MAX_PAGE_IDX_MASK GENMASK(7, 5)
+#define PAGE_IDX_MASK GENMASK(4, 2)
+#define PER_PAGE_SIZE 0x400
+ struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER };
+ u16 eeprom_size = mt7915_eeprom_size(dev);
+ u8 total = DIV_ROUND_UP(eeprom_size, PER_PAGE_SIZE);
+ u8 *eep = (u8 *)dev->mt76.eeprom.data;
+ int eep_len;
+ int i;
+
+ for (i = 0; i < total; i++, eep += eep_len) {
+ struct sk_buff *skb;
+ int ret;
+
+ if (i == total - 1 && !!(eeprom_size % PER_PAGE_SIZE))
+ eep_len = eeprom_size % PER_PAGE_SIZE;
+ else
+ eep_len = PER_PAGE_SIZE;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
+ sizeof(req) + eep_len);
+ if (!skb)
+ return -ENOMEM;
+
+ req.format = FIELD_PREP(MAX_PAGE_IDX_MASK, total - 1) |
+ FIELD_PREP(PAGE_IDX_MASK, i) | EE_FORMAT_WHOLE;
+ req.len = cpu_to_le16(eep_len);
+
+ skb_put_data(skb, &req, sizeof(req));
+ skb_put_data(skb, eep, eep_len);
+
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(EFUSE_BUFFER_MODE), true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev)
+{
+ struct mt7915_mcu_eeprom req = {
+ .buffer_mode = EE_MODE_EFUSE,
+ .format = EE_FORMAT_WHOLE,
+ };
+
+ if (dev->flash_mode)
+ return mt7915_mcu_set_eeprom_flash(dev);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(EFUSE_BUFFER_MODE),
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset)
+{
+ struct mt7915_mcu_eeprom_info req = {
+ .addr = cpu_to_le32(round_down(offset,
+ MT7915_EEPROM_BLOCK_SIZE)),
+ };
+ struct mt7915_mcu_eeprom_info *res;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_EXT_QUERY(EFUSE_ACCESS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct mt7915_mcu_eeprom_info *)skb->data;
+ buf = dev->mt76.eeprom.data + le32_to_cpu(res->addr);
+ memcpy(buf, res->data, MT7915_EEPROM_BLOCK_SIZE);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num)
+{
+ struct {
+ u8 _rsv;
+ u8 version;
+ u8 die_idx;
+ u8 _rsv2;
+ } __packed req = {
+ .version = 1,
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_EXT_QUERY(EFUSE_FREE_BLOCK),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ *block_num = *(u8 *)skb->data;
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx,
+ u8 *data, u32 len, int cmd)
+{
+ struct {
+ u8 dir;
+ u8 valid;
+ __le16 bitmap;
+ s8 precal;
+ u8 action;
+ u8 band;
+ u8 idx;
+ u8 rsv[4];
+ __le32 len;
+ } req = {};
+ struct sk_buff *skb;
+
+ skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req) + len);
+ if (!skb)
+ return -ENOMEM;
+
+ req.idx = idx;
+ req.len = cpu_to_le32(len);
+ skb_put_data(skb, &req, sizeof(req));
+ skb_put_data(skb, data, len);
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb, cmd, false);
+}
+
+int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev)
+{
+ u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data;
+ u32 total = MT_EE_CAL_GROUP_SIZE;
+
+ if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_GROUP))
+ return 0;
+
+ /*
+ * Items: Rx DCOC, RSSI DCOC, Tx TSSI DCOC, Tx LPFG
+ * Tx FDIQ, Tx DCIQ, Rx FDIQ, Rx FIIQ, ADCDCOC
+ */
+ while (total > 0) {
+ int ret, len;
+
+ len = min_t(u32, total, MT_EE_CAL_UNIT);
+
+ ret = mt7915_mcu_set_pre_cal(dev, idx, cal, len,
+ MCU_EXT_CMD(GROUP_PRE_CAL_INFO));
+ if (ret)
+ return ret;
+
+ total -= len;
+ cal += len;
+ idx++;
+ }
+
+ return 0;
+}
+
+static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
+{
+ int i;
+
+ for (i = 0; i < n_freqs; i++)
+ if (cur == freqs[i])
+ return i;
+
+ return -1;
+}
+
+static int mt7915_dpd_freq_idx(u16 freq, u8 bw)
+{
+ static const u16 freq_list[] = {
+ 5180, 5200, 5220, 5240,
+ 5260, 5280, 5300, 5320,
+ 5500, 5520, 5540, 5560,
+ 5580, 5600, 5620, 5640,
+ 5660, 5680, 5700, 5745,
+ 5765, 5785, 5805, 5825
+ };
+ int offset_2g = ARRAY_SIZE(freq_list);
+ int idx;
+
+ if (freq < 4000) {
+ if (freq < 2432)
+ return offset_2g;
+ if (freq < 2457)
+ return offset_2g + 1;
+
+ return offset_2g + 2;
+ }
+
+ if (bw == NL80211_CHAN_WIDTH_80P80 || bw == NL80211_CHAN_WIDTH_160)
+ return -1;
+
+ if (bw != NL80211_CHAN_WIDTH_20) {
+ idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
+ freq + 10);
+ if (idx >= 0)
+ return idx;
+
+ idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
+ freq - 10);
+ if (idx >= 0)
+ return idx;
+ }
+
+ return mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
+}
+
+int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ u16 total = 2, center_freq = chandef->center_freq1;
+ u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data;
+ int idx;
+
+ if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD))
+ return 0;
+
+ idx = mt7915_dpd_freq_idx(center_freq, chandef->width);
+ if (idx < 0)
+ return -EINVAL;
+
+ /* Items: Tx DPD, Tx Flatness */
+ idx = idx * 2;
+ cal += MT_EE_CAL_GROUP_SIZE;
+
+ while (total--) {
+ int ret;
+
+ cal += (idx * MT_EE_CAL_UNIT);
+ ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT,
+ MCU_EXT_CMD(DPD_PRE_CAL_INFO));
+ if (ret)
+ return ret;
+
+ idx++;
+ }
+
+ return 0;
+}
+
+int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch)
+{
+ struct mt76_channel_state *state = phy->mt76->chan_state;
+ struct mt76_channel_state *state_ts = &phy->state_ts;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_mcu_mib *res, req[5];
+ struct sk_buff *skb;
+ static const u32 *offs;
+ int i, ret, len, offs_cc;
+ u64 cc_tx;
+
+ /* strict order */
+ if (is_mt7915(&dev->mt76)) {
+ static const u32 chip_offs[] = {
+ MIB_NON_WIFI_TIME,
+ MIB_TX_TIME,
+ MIB_RX_TIME,
+ MIB_OBSS_AIRTIME,
+ MIB_TXOP_INIT_COUNT,
+ };
+ len = ARRAY_SIZE(chip_offs);
+ offs = chip_offs;
+ offs_cc = 20;
+ } else {
+ static const u32 chip_offs[] = {
+ MIB_NON_WIFI_TIME_V2,
+ MIB_TX_TIME_V2,
+ MIB_RX_TIME_V2,
+ MIB_OBSS_AIRTIME_V2
+ };
+ len = ARRAY_SIZE(chip_offs);
+ offs = chip_offs;
+ offs_cc = 0;
+ }
+
+ for (i = 0; i < len; i++) {
+ req[i].band = cpu_to_le32(phy->mt76->band_idx);
+ req[i].offs = cpu_to_le32(offs[i]);
+ }
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(GET_MIB_INFO),
+ req, len * sizeof(req[0]), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct mt7915_mcu_mib *)(skb->data + offs_cc);
+
+#define __res_u64(s) le64_to_cpu(res[s].data)
+ /* subtract Tx backoff time from Tx duration */
+ cc_tx = is_mt7915(&dev->mt76) ? __res_u64(1) - __res_u64(4) : __res_u64(1);
+
+ if (chan_switch)
+ goto out;
+
+ state->cc_tx += cc_tx - state_ts->cc_tx;
+ state->cc_bss_rx += __res_u64(2) - state_ts->cc_bss_rx;
+ state->cc_rx += __res_u64(2) + __res_u64(3) - state_ts->cc_rx;
+ state->cc_busy += __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3) -
+ state_ts->cc_busy;
+
+out:
+ state_ts->cc_tx = cc_tx;
+ state_ts->cc_bss_rx = __res_u64(2);
+ state_ts->cc_rx = __res_u64(2) + __res_u64(3);
+ state_ts->cc_busy = __res_u64(0) + cc_tx + __res_u64(2) + __res_u64(3);
+#undef __res_u64
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+int mt7915_mcu_get_temperature(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 ctrl_id;
+ u8 action;
+ u8 band_idx;
+ u8 rsv[5];
+ } req = {
+ .ctrl_id = THERMAL_SENSOR_TEMP_QUERY,
+ .band_idx = phy->mt76->band_idx,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_CTRL), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_mcu_thermal_ctrl req = {
+ .band_idx = phy->mt76->band_idx,
+ .ctrl_id = THERMAL_PROTECT_DUTY_CONFIG,
+ };
+ int level, ret;
+
+ /* set duty cycle and level */
+ for (level = 0; level < 4; level++) {
+ req.duty.duty_level = level;
+ req.duty.duty_cycle = state;
+ state /= 2;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ &req, sizeof(req), false);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ struct mt7915_mcu_thermal_ctrl ctrl;
+
+ __le32 trigger_temp;
+ __le32 restore_temp;
+ __le16 sustain_time;
+ u8 rsv[2];
+ } __packed req = {
+ .ctrl = {
+ .band_idx = phy->mt76->band_idx,
+ .type.protect_type = 1,
+ .type.trigger_type = 1,
+ },
+ };
+ int ret;
+
+ req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ &req, sizeof(req.ctrl), false);
+
+ if (ret)
+ return ret;
+
+ /* set high-temperature trigger threshold */
+ req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE;
+ /* add a safety margin ~10 */
+ req.restore_temp = cpu_to_le32(phy->throttle_temp[0] - 10);
+ req.trigger_temp = cpu_to_le32(phy->throttle_temp[1]);
+ req.sustain_time = cpu_to_le16(10);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 format_id;
+ u8 rsv;
+ u8 band_idx;
+ s8 txpower_min;
+ } __packed req = {
+ .format_id = TX_POWER_LIMIT_FRAME_MIN,
+ .band_idx = phy->mt76->band_idx,
+ .txpower_min = txpower * 2, /* 0.5db */
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, s8 txpower)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct {
+ u8 format_id;
+ u8 rsv[3];
+ u8 band_idx;
+ s8 txpower_max;
+ __le16 wcid;
+ s8 txpower_offs[48];
+ } __packed req = {
+ .format_id = TX_POWER_LIMIT_FRAME,
+ .band_idx = phy->mt76->band_idx,
+ .txpower_max = DIV_ROUND_UP(mphy->txpower_cur, 2),
+ .wcid = cpu_to_le16(msta->wcid.idx),
+ };
+ int ret;
+ s8 txpower_sku[MT7915_SKU_RATE_NUM];
+
+ ret = mt7915_mcu_get_txpower_sku(phy, txpower_sku, sizeof(txpower_sku));
+ if (ret)
+ return ret;
+
+ txpower = mt7915_get_power_bound(phy, txpower);
+ if (txpower > mphy->txpower_cur || txpower < 0)
+ return -EINVAL;
+
+ if (txpower) {
+ u32 offs, len, i;
+
+ if (sta->deflink.ht_cap.ht_supported) {
+ const u8 *sku_len = mt7915_sku_group_len;
+
+ offs = sku_len[SKU_CCK] + sku_len[SKU_OFDM];
+ len = sku_len[SKU_HT_BW20] + sku_len[SKU_HT_BW40];
+
+ if (sta->deflink.vht_cap.vht_supported) {
+ offs += len;
+ len = sku_len[SKU_VHT_BW20] * 4;
+
+ if (sta->deflink.he_cap.has_he) {
+ offs += len + sku_len[SKU_HE_RU26] * 3;
+ len = sku_len[SKU_HE_RU242] * 4;
+ }
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ for (i = 0; i < len; i++, offs++)
+ req.txpower_offs[i] =
+ DIV_ROUND_UP(txpower - txpower_sku[offs], 2);
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct ieee80211_hw *hw = mphy->hw;
+ struct mt7915_mcu_txpower_sku req = {
+ .format_id = TX_POWER_LIMIT_TABLE,
+ .band_idx = phy->mt76->band_idx,
+ };
+ struct mt76_power_limits limits_array;
+ s8 *la = (s8 *)&limits_array;
+ int i, idx;
+ int tx_power;
+
+ tx_power = mt7915_get_power_bound(phy, hw->conf.power_level);
+ tx_power = mt76_get_rate_power_limits(mphy, mphy->chandef.chan,
+ &limits_array, tx_power);
+ mphy->txpower_cur = tx_power;
+
+ for (i = 0, idx = 0; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
+ u8 mcs_num, len = mt7915_sku_group_len[i];
+ int j;
+
+ if (i >= SKU_HT_BW20 && i <= SKU_VHT_BW160) {
+ mcs_num = 10;
+
+ if (i == SKU_HT_BW20 || i == SKU_VHT_BW20)
+ la = (s8 *)&limits_array + 12;
+ } else {
+ mcs_num = len;
+ }
+
+ for (j = 0; j < min_t(u8, mcs_num, len); j++)
+ req.txpower_sku[idx + j] = la[j];
+
+ la += mcs_num;
+ idx += len;
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len)
+{
+#define RATE_POWER_INFO 2
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ u8 format_id;
+ u8 category;
+ u8 band_idx;
+ u8 _rsv;
+ } __packed req = {
+ .format_id = TX_POWER_LIMIT_INFO,
+ .category = RATE_POWER_INFO,
+ .band_idx = phy->mt76->band_idx,
+ };
+ s8 txpower_sku[MT7915_SKU_RATE_NUM][2];
+ struct sk_buff *skb;
+ int ret, i;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ memcpy(txpower_sku, skb->data + 4, sizeof(txpower_sku));
+ for (i = 0; i < len; i++)
+ txpower[i] = txpower_sku[i][req.band_idx];
+
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
+ u8 en)
+{
+ struct {
+ u8 test_mode_en;
+ u8 param_idx;
+ u8 _rsv[2];
+
+ u8 enable;
+ u8 _rsv2[3];
+
+ u8 pad[8];
+ } __packed req = {
+ .test_mode_en = test_mode,
+ .param_idx = param,
+ .enable = en,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
+ sizeof(req), false);
+}
+
+int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sku {
+ u8 format_id;
+ u8 sku_enable;
+ u8 band_idx;
+ u8 rsv;
+ } __packed req = {
+ .format_id = TX_POWER_LIMIT_ENABLE,
+ .band_idx = phy->mt76->band_idx,
+ .sku_enable = enable,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band)
+{
+ struct {
+ u8 action;
+ u8 set;
+ u8 band;
+ u8 rsv;
+ } req = {
+ .action = action,
+ .set = set,
+ .band = band,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SER_TRIGGER),
+ &req, sizeof(req), false);
+}
+
+int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action)
+{
+ struct {
+ u8 action;
+ union {
+ struct {
+ u8 snd_mode;
+ u8 sta_num;
+ u8 rsv;
+ u8 wlan_idx[4];
+ __le32 snd_period; /* ms */
+ } __packed snd;
+ struct {
+ bool ebf;
+ bool ibf;
+ u8 rsv;
+ } __packed type;
+ struct {
+ u8 bf_num;
+ u8 bf_bitmap;
+ u8 bf_sel[8];
+ u8 rsv[5];
+ } __packed mod;
+ };
+ } __packed req = {
+ .action = action,
+ };
+
+#define MT_BF_PROCESSING 4
+ switch (action) {
+ case MT_BF_SOUNDING_ON:
+ req.snd.snd_mode = MT_BF_PROCESSING;
+ break;
+ case MT_BF_TYPE_UPDATE:
+ req.type.ebf = true;
+ req.type.ibf = dev->ibf;
+ break;
+ case MT_BF_MODULE_UPDATE:
+ req.mod.bf_num = 2;
+ req.mod.bf_bitmap = GENMASK(1, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TXBF_ACTION), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_mcu_enable_obss_spr(struct mt7915_phy *phy, u8 action, u8 val)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_mcu_sr_ctrl req = {
+ .action = action,
+ .argnum = 1,
+ .band_idx = phy->mt76->band_idx,
+ .val = cpu_to_le32(val),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_mcu_set_obss_spr_pd(struct mt7915_phy *phy,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ struct mt7915_mcu_sr_ctrl ctrl;
+ struct {
+ u8 pd_th_non_srg;
+ u8 pd_th_srg;
+ u8 period_offs;
+ u8 rcpi_src;
+ __le16 obss_pd_min;
+ __le16 obss_pd_min_srg;
+ u8 resp_txpwr_mode;
+ u8 txpwr_restrict_mode;
+ u8 txpwr_ref;
+ u8 rsv[3];
+ } __packed param;
+ } __packed req = {
+ .ctrl = {
+ .action = SPR_SET_PARAM,
+ .argnum = 9,
+ .band_idx = phy->mt76->band_idx,
+ },
+ };
+ int ret;
+ u8 max_th = 82, non_srg_max_th = 62;
+
+ /* disable firmware dynamical PD asjustment */
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_DPD, false);
+ if (ret)
+ return ret;
+
+ if (he_obss_pd->sr_ctrl &
+ IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED)
+ req.param.pd_th_non_srg = max_th;
+ else if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
+ req.param.pd_th_non_srg = max_th - he_obss_pd->non_srg_max_offset;
+ else
+ req.param.pd_th_non_srg = non_srg_max_th;
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
+ req.param.pd_th_srg = max_th - he_obss_pd->max_offset;
+
+ req.param.obss_pd_min = cpu_to_le16(82);
+ req.param.obss_pd_min_srg = cpu_to_le16(82);
+ req.param.txpwr_restrict_mode = 2;
+ req.param.txpwr_ref = 21;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_mcu_set_obss_spr_siga(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
+ u8 omac = mvif->mt76.omac_idx;
+ struct {
+ struct mt7915_mcu_sr_ctrl ctrl;
+ struct {
+ u8 omac;
+ u8 rsv[3];
+ u8 flag[20];
+ } __packed siga;
+ } __packed req = {
+ .ctrl = {
+ .action = SPR_SET_SIGA,
+ .argnum = 1,
+ .band_idx = phy->mt76->band_idx,
+ },
+ .siga = {
+ .omac = omac > HW_BSSID_MAX ? omac - 12 : omac,
+ },
+ };
+ int ret;
+
+ if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED)
+ req.siga.flag[req.siga.omac] = 0xf;
+ else
+ return 0;
+
+ /* switch to normal AP mode */
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_MODE, 0);
+ if (ret)
+ return ret;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_mcu_set_obss_spr_bitmap(struct mt7915_phy *phy,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct {
+ struct mt7915_mcu_sr_ctrl ctrl;
+ struct {
+ __le32 color_l[2];
+ __le32 color_h[2];
+ __le32 bssid_l[2];
+ __le32 bssid_h[2];
+ } __packed bitmap;
+ } __packed req = {
+ .ctrl = {
+ .action = SPR_SET_SRG_BITMAP,
+ .argnum = 4,
+ .band_idx = phy->mt76->band_idx,
+ },
+ };
+ u32 bitmap;
+
+ memcpy(&bitmap, he_obss_pd->bss_color_bitmap, sizeof(bitmap));
+ req.bitmap.color_l[req.ctrl.band_idx] = cpu_to_le32(bitmap);
+
+ memcpy(&bitmap, he_obss_pd->bss_color_bitmap + 4, sizeof(bitmap));
+ req.bitmap.color_h[req.ctrl.band_idx] = cpu_to_le32(bitmap);
+
+ memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap, sizeof(bitmap));
+ req.bitmap.bssid_l[req.ctrl.band_idx] = cpu_to_le32(bitmap);
+
+ memcpy(&bitmap, he_obss_pd->partial_bssid_bitmap + 4, sizeof(bitmap));
+ req.bitmap.bssid_h[req.ctrl.band_idx] = cpu_to_le32(bitmap);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_SPR), &req,
+ sizeof(req), true);
+}
+
+int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_he_obss_pd *he_obss_pd)
+{
+ int ret;
+
+ /* enable firmware scene detection algorithms */
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_SD, sr_scene_detect);
+ if (ret)
+ return ret;
+
+ /* firmware dynamically adjusts PD threshold so skip manual control */
+ if (sr_scene_detect && !he_obss_pd->enable)
+ return 0;
+
+ /* enable spatial reuse */
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE, he_obss_pd->enable);
+ if (ret)
+ return ret;
+
+ if (sr_scene_detect || !he_obss_pd->enable)
+ return 0;
+
+ ret = mt7915_mcu_enable_obss_spr(phy, SPR_ENABLE_TX, true);
+ if (ret)
+ return ret;
+
+ /* set SRG/non-SRG OBSS PD threshold */
+ ret = mt7915_mcu_set_obss_spr_pd(phy, he_obss_pd);
+ if (ret)
+ return ret;
+
+ /* Set SR prohibit */
+ ret = mt7915_mcu_set_obss_spr_siga(phy, vif, he_obss_pd);
+ if (ret)
+ return ret;
+
+ /* set SRG BSS color/BSSID bitmap */
+ return mt7915_mcu_set_obss_spr_bitmap(phy, he_obss_pd);
+}
+
+int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct rate_info *rate)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct {
+ u8 category;
+ u8 band;
+ __le16 wcid;
+ } __packed req = {
+ .category = MCU_PHY_STATE_CONTENTION_RX_RATE,
+ .band = mvif->mt76.band_idx,
+ .wcid = cpu_to_le16(msta->wcid.idx),
+ };
+ struct ieee80211_supported_band *sband;
+ struct mt7915_mcu_phy_rx_info *res;
+ struct sk_buff *skb;
+ int ret;
+ bool cck = false;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_CMD(PHY_STAT_INFO),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ res = (struct mt7915_mcu_phy_rx_info *)skb->data;
+
+ rate->mcs = res->rate;
+ rate->nss = res->nsts + 1;
+
+ switch (res->mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate->mcs = mt76_get_rate(&dev->mt76, sband, rate->mcs, cck);
+ rate->legacy = sband->bitrates[rate->mcs].bitrate;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ if (rate->mcs > 31) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rate->flags = RATE_INFO_FLAGS_MCS;
+ if (res->gi)
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_VHT:
+ if (rate->mcs > 9) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rate->flags = RATE_INFO_FLAGS_VHT_MCS;
+ if (res->gi)
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ if (res->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11) {
+ ret = -EINVAL;
+ goto out;
+ }
+ rate->he_gi = res->gi;
+ rate->flags = RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (res->bw) {
+ case IEEE80211_STA_RX_BW_160:
+ rate->bw = RATE_INFO_BW_160;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ rate->bw = RATE_INFO_BW_80;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ rate->bw = RATE_INFO_BW_40;
+ break;
+ default:
+ rate->bw = RATE_INFO_BW_20;
+ break;
+ }
+
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *he_bss_color)
+{
+ int len = sizeof(struct sta_req_hdr) + sizeof(struct bss_info_color);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct bss_info_color *bss_color;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
+ NULL, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR,
+ sizeof(*bss_color));
+ bss_color = (struct bss_info_color *)tlv;
+ bss_color->disable = !he_bss_color->enabled;
+ bss_color->color = he_bss_color->color;
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+}
+
+#define TWT_AGRT_TRIGGER BIT(0)
+#define TWT_AGRT_ANNOUNCE BIT(1)
+#define TWT_AGRT_PROTECT BIT(2)
+
+int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
+ struct mt7915_vif *mvif,
+ struct mt7915_twt_flow *flow,
+ int cmd)
+{
+ struct {
+ u8 tbl_idx;
+ u8 cmd;
+ u8 own_mac_idx;
+ u8 flowid; /* 0xff for group id */
+ __le16 peer_id; /* specify the peer_id (msb=0)
+ * or group_id (msb=1)
+ */
+ u8 duration; /* 256 us */
+ u8 bss_idx;
+ __le64 start_tsf;
+ __le16 mantissa;
+ u8 exponent;
+ u8 is_ap;
+ u8 agrt_params;
+ u8 rsv[23];
+ } __packed req = {
+ .tbl_idx = flow->table_id,
+ .cmd = cmd,
+ .own_mac_idx = mvif->mt76.omac_idx,
+ .flowid = flow->id,
+ .peer_id = cpu_to_le16(flow->wcid),
+ .duration = flow->duration,
+ .bss_idx = mvif->mt76.idx,
+ .start_tsf = cpu_to_le64(flow->tsf),
+ .mantissa = flow->mantissa,
+ .exponent = flow->exp,
+ .is_ap = true,
+ };
+
+ if (flow->protection)
+ req.agrt_params |= TWT_AGRT_PROTECT;
+ if (!flow->flowtype)
+ req.agrt_params |= TWT_AGRT_ANNOUNCE;
+ if (flow->trigger)
+ req.agrt_params |= TWT_AGRT_TRIGGER;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TWT_AGRT_UPDATE),
+ &req, sizeof(req), true);
+}
+
+int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx)
+{
+ struct {
+ __le32 cmd;
+ __le32 num;
+ __le32 __rsv;
+ __le16 wlan_idx;
+ } req = {
+ .cmd = cpu_to_le32(0x15),
+ .num = cpu_to_le32(1),
+ .wlan_idx = cpu_to_le16(wlan_idx),
+ };
+ struct mt7915_mcu_wa_tx_stat {
+ __le16 wlan_idx;
+ u8 __rsv[2];
+
+ /* tx_bytes is deprecated since WA byte counter uses u32,
+ * which easily leads to overflow.
+ */
+ __le32 tx_bytes;
+ __le32 tx_packets;
+ } *res;
+ struct mt76_wcid *wcid;
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WA_PARAM_CMD(QUERY),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ if (!is_mt7915(&dev->mt76))
+ skb_pull(skb, 4);
+
+ res = (struct mt7915_mcu_wa_tx_stat *)skb->data;
+
+ if (le16_to_cpu(res->wlan_idx) != wlan_idx) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]);
+ if (wcid)
+ wcid->stats.tx_packets += le32_to_cpu(res->tx_packets);
+ else
+ ret = -EINVAL;
+
+ rcu_read_unlock();
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set)
+{
+ struct {
+ __le32 idx;
+ __le32 ofs;
+ __le32 data;
+ } __packed req = {
+ .idx = cpu_to_le32(u32_get_bits(regidx, GENMASK(31, 24))),
+ .ofs = cpu_to_le32(u32_get_bits(regidx, GENMASK(23, 0))),
+ .data = set ? cpu_to_le32(*val) : 0,
+ };
+ struct sk_buff *skb;
+ int ret;
+
+ if (set)
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_REG_ACCESS),
+ &req, sizeof(req), false);
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_EXT_QUERY(RF_REG_ACCESS),
+ &req, sizeof(req), true, &skb);
+ if (ret)
+ return ret;
+
+ *val = le32_to_cpu(*(__le32 *)(skb->data + 8));
+ dev_kfree_skb(skb);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
new file mode 100644
index 0000000000..1592b5d675
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_MCU_H
+#define __MT7915_MCU_H
+
+#include "../mt76_connac_mcu.h"
+
+enum {
+ MCU_ATE_SET_TRX = 0x1,
+ MCU_ATE_SET_FREQ_OFFSET = 0xa,
+ MCU_ATE_SET_SLOT_TIME = 0x13,
+ MCU_ATE_CLEAN_TXQUEUE = 0x1c,
+};
+
+struct mt7915_mcu_thermal_ctrl {
+ u8 ctrl_id;
+ u8 band_idx;
+ union {
+ struct {
+ u8 protect_type; /* 1: duty admit, 2: radio off */
+ u8 trigger_type; /* 0: low, 1: high */
+ } __packed type;
+ struct {
+ u8 duty_level; /* level 0~3 */
+ u8 duty_cycle;
+ } __packed duty;
+ };
+} __packed;
+
+struct mt7915_mcu_thermal_notify {
+ struct mt76_connac2_mcu_rxd rxd;
+
+ struct mt7915_mcu_thermal_ctrl ctrl;
+ __le32 temperature;
+ u8 rsv[8];
+} __packed;
+
+struct mt7915_mcu_csa_notify {
+ struct mt76_connac2_mcu_rxd rxd;
+
+ u8 omac_idx;
+ u8 csa_count;
+ u8 band_idx;
+ u8 rsv;
+} __packed;
+
+struct mt7915_mcu_bcc_notify {
+ struct mt76_connac2_mcu_rxd rxd;
+
+ u8 band_idx;
+ u8 omac_idx;
+ u8 cca_count;
+ u8 rsv;
+} __packed;
+
+struct mt7915_mcu_rdd_report {
+ struct mt76_connac2_mcu_rxd rxd;
+
+ u8 band_idx;
+ u8 long_detected;
+ u8 constant_prf_detected;
+ u8 staggered_prf_detected;
+ u8 radar_type_idx;
+ u8 periodic_pulse_num;
+ u8 long_pulse_num;
+ u8 hw_pulse_num;
+
+ u8 out_lpn;
+ u8 out_spn;
+ u8 out_crpn;
+ u8 out_crpw;
+ u8 out_crbn;
+ u8 out_stgpn;
+ u8 out_stgpw;
+
+ u8 rsv;
+
+ __le32 out_pri_const;
+ __le32 out_pri_stg[3];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 mdrdy_flag;
+ u8 rsv[3];
+ } long_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 mdrdy_flag;
+ u8 rsv[3];
+ } periodic_pulse[32];
+
+ struct {
+ __le32 start;
+ __le16 pulse_width;
+ __le16 pulse_power;
+ u8 sc_pass;
+ u8 sw_reset;
+ u8 mdrdy_flag;
+ u8 tx_active;
+ } hw_pulse[32];
+} __packed;
+
+struct mt7915_mcu_background_chain_ctrl {
+ u8 chan; /* primary channel */
+ u8 central_chan; /* central channel */
+ u8 bw;
+ u8 tx_stream;
+ u8 rx_stream;
+
+ u8 monitor_chan; /* monitor channel */
+ u8 monitor_central_chan;/* monitor central channel */
+ u8 monitor_bw;
+ u8 monitor_tx_stream;
+ u8 monitor_rx_stream;
+
+ u8 scan_mode; /* 0: ScanStop
+ * 1: ScanStart
+ * 2: ScanRunning
+ */
+ u8 band_idx; /* DBDC */
+ u8 monitor_scan_type;
+ u8 band; /* 0: 2.4GHz, 1: 5GHz */
+ u8 rsv[2];
+} __packed;
+
+struct mt7915_mcu_sr_ctrl {
+ u8 action;
+ u8 argnum;
+ u8 band_idx;
+ u8 status;
+ u8 drop_ta_idx;
+ u8 sta_idx; /* 256 sta */
+ u8 rsv[2];
+ __le32 val;
+} __packed;
+
+struct mt7915_mcu_eeprom {
+ u8 buffer_mode;
+ u8 format;
+ __le16 len;
+} __packed;
+
+struct mt7915_mcu_eeprom_info {
+ __le32 addr;
+ __le32 valid;
+ u8 data[16];
+} __packed;
+
+struct mt7915_mcu_phy_rx_info {
+ u8 category;
+ u8 rate;
+ u8 mode;
+ u8 nsts;
+ u8 gi;
+ u8 coding;
+ u8 stbc;
+ u8 bw;
+};
+
+struct mt7915_mcu_mib {
+ __le32 band;
+ __le32 offs;
+ __le64 data;
+} __packed;
+
+enum mt7915_chan_mib_offs {
+ /* mt7915 */
+ MIB_TX_TIME = 81,
+ MIB_RX_TIME,
+ MIB_OBSS_AIRTIME = 86,
+ MIB_NON_WIFI_TIME,
+ MIB_TXOP_INIT_COUNT,
+
+ /* mt7916 */
+ MIB_TX_TIME_V2 = 6,
+ MIB_RX_TIME_V2 = 8,
+ MIB_OBSS_AIRTIME_V2 = 490,
+ MIB_NON_WIFI_TIME_V2
+};
+
+struct mt7915_mcu_txpower_sku {
+ u8 format_id;
+ u8 limit_type;
+ u8 band_idx;
+ s8 txpower_sku[MT7915_SKU_RATE_NUM];
+} __packed;
+
+struct edca {
+ u8 queue;
+ u8 set;
+ u8 aifs;
+ u8 cw_min;
+ __le16 cw_max;
+ __le16 txop;
+};
+
+struct mt7915_mcu_tx {
+ u8 total;
+ u8 action;
+ u8 valid;
+ u8 mode;
+
+ struct edca edca[IEEE80211_NUM_ACS];
+} __packed;
+
+struct mt7915_mcu_muru_stats {
+ __le32 event_id;
+ struct {
+ __le32 cck_cnt;
+ __le32 ofdm_cnt;
+ __le32 htmix_cnt;
+ __le32 htgf_cnt;
+ __le32 vht_su_cnt;
+ __le32 vht_2mu_cnt;
+ __le32 vht_3mu_cnt;
+ __le32 vht_4mu_cnt;
+ __le32 he_su_cnt;
+ __le32 he_ext_su_cnt;
+ __le32 he_2ru_cnt;
+ __le32 he_2mu_cnt;
+ __le32 he_3ru_cnt;
+ __le32 he_3mu_cnt;
+ __le32 he_4ru_cnt;
+ __le32 he_4mu_cnt;
+ __le32 he_5to8ru_cnt;
+ __le32 he_9to16ru_cnt;
+ __le32 he_gtr16ru_cnt;
+ } dl;
+
+ struct {
+ __le32 hetrig_su_cnt;
+ __le32 hetrig_2ru_cnt;
+ __le32 hetrig_3ru_cnt;
+ __le32 hetrig_4ru_cnt;
+ __le32 hetrig_5to8ru_cnt;
+ __le32 hetrig_9to16ru_cnt;
+ __le32 hetrig_gtr16ru_cnt;
+ __le32 hetrig_2mu_cnt;
+ __le32 hetrig_3mu_cnt;
+ __le32 hetrig_4mu_cnt;
+ } ul;
+};
+
+#define WMM_AIFS_SET BIT(0)
+#define WMM_CW_MIN_SET BIT(1)
+#define WMM_CW_MAX_SET BIT(2)
+#define WMM_TXOP_SET BIT(3)
+#define WMM_PARAM_SET GENMASK(3, 0)
+
+enum {
+ MCU_FW_LOG_WM,
+ MCU_FW_LOG_WA,
+ MCU_FW_LOG_TO_HOST,
+};
+
+enum {
+ MCU_TWT_AGRT_ADD,
+ MCU_TWT_AGRT_MODIFY,
+ MCU_TWT_AGRT_DELETE,
+ MCU_TWT_AGRT_TEARDOWN,
+ MCU_TWT_AGRT_GET_TSF,
+};
+
+enum {
+ MCU_WA_PARAM_CMD_QUERY,
+ MCU_WA_PARAM_CMD_SET,
+ MCU_WA_PARAM_CMD_CAPABILITY,
+ MCU_WA_PARAM_CMD_DEBUG,
+};
+
+enum {
+ MCU_WA_PARAM_PDMA_RX = 0x04,
+ MCU_WA_PARAM_CPU_UTIL = 0x0b,
+ MCU_WA_PARAM_RED = 0x0e,
+ MCU_WA_PARAM_RED_SETTING = 0x40,
+};
+
+enum mcu_mmps_mode {
+ MCU_MMPS_STATIC,
+ MCU_MMPS_DYNAMIC,
+ MCU_MMPS_RSV,
+ MCU_MMPS_DISABLE,
+};
+
+struct bss_info_bmc_rate {
+ __le16 tag;
+ __le16 len;
+ __le16 bc_trans;
+ __le16 mc_trans;
+ u8 short_preamble;
+ u8 rsv[7];
+} __packed;
+
+struct bss_info_ra {
+ __le16 tag;
+ __le16 len;
+ u8 op_mode;
+ u8 adhoc_en;
+ u8 short_preamble;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 algo;
+ u8 force_sgi;
+ u8 force_gf;
+ u8 ht_mode;
+ u8 has_20_sta; /* Check if any sta support GF. */
+ u8 bss_width_trigger_events;
+ u8 vht_nss_cap;
+ u8 vht_bw_signal; /* not use */
+ u8 vht_force_sgi; /* not use */
+ u8 se_off;
+ u8 antenna_idx;
+ u8 train_up_rule;
+ u8 rsv[3];
+ unsigned short train_up_high_thres;
+ short train_up_rule_rssi;
+ unsigned short low_traffic_thres;
+ __le16 max_phyrate;
+ __le32 phy_cap;
+ __le32 interval;
+ __le32 fast_interval;
+} __packed;
+
+struct bss_info_hw_amsdu {
+ __le16 tag;
+ __le16 len;
+ __le32 cmp_bitmap_0;
+ __le32 cmp_bitmap_1;
+ __le16 trig_thres;
+ u8 enable;
+ u8 rsv;
+} __packed;
+
+struct bss_info_color {
+ __le16 tag;
+ __le16 len;
+ u8 disable;
+ u8 color;
+ u8 rsv[2];
+} __packed;
+
+struct bss_info_he {
+ __le16 tag;
+ __le16 len;
+ u8 he_pe_duration;
+ u8 vht_op_info_present;
+ __le16 he_rts_thres;
+ __le16 max_nss_mcs[CMD_HE_MCS_BW_NUM];
+ u8 rsv[6];
+} __packed;
+
+struct bss_info_bcn {
+ __le16 tag;
+ __le16 len;
+ u8 ver;
+ u8 enable;
+ __le16 sub_ntlv;
+} __packed __aligned(4);
+
+struct bss_info_bcn_cntdwn {
+ __le16 tag;
+ __le16 len;
+ u8 cnt;
+ u8 rsv[3];
+} __packed __aligned(4);
+
+struct bss_info_bcn_mbss {
+#define MAX_BEACON_NUM 32
+ __le16 tag;
+ __le16 len;
+ __le32 bitmap;
+ __le16 offset[MAX_BEACON_NUM];
+ u8 rsv[8];
+} __packed __aligned(4);
+
+struct bss_info_bcn_cont {
+ __le16 tag;
+ __le16 len;
+ __le16 tim_ofs;
+ __le16 csa_ofs;
+ __le16 bcc_ofs;
+ __le16 pkt_len;
+} __packed __aligned(4);
+
+struct bss_info_inband_discovery {
+ __le16 tag;
+ __le16 len;
+ u8 tx_type;
+ u8 tx_mode;
+ u8 tx_interval;
+ u8 enable;
+ __le16 rsv;
+ __le16 prob_rsp_len;
+} __packed __aligned(4);
+
+enum {
+ BSS_INFO_BCN_CSA,
+ BSS_INFO_BCN_BCC,
+ BSS_INFO_BCN_MBSSID,
+ BSS_INFO_BCN_CONTENT,
+ BSS_INFO_BCN_DISCOV,
+ BSS_INFO_BCN_MAX
+};
+
+enum {
+ RATE_PARAM_FIXED = 3,
+ RATE_PARAM_MMPS_UPDATE = 5,
+ RATE_PARAM_FIXED_HE_LTF = 7,
+ RATE_PARAM_FIXED_MCS,
+ RATE_PARAM_FIXED_GI = 11,
+ RATE_PARAM_AUTO = 20,
+ RATE_PARAM_SPE_UPDATE = 22,
+};
+
+#define RATE_CFG_MCS GENMASK(3, 0)
+#define RATE_CFG_NSS GENMASK(7, 4)
+#define RATE_CFG_GI GENMASK(11, 8)
+#define RATE_CFG_BW GENMASK(15, 12)
+#define RATE_CFG_STBC GENMASK(19, 16)
+#define RATE_CFG_LDPC GENMASK(23, 20)
+#define RATE_CFG_PHY_TYPE GENMASK(27, 24)
+#define RATE_CFG_HE_LTF GENMASK(31, 28)
+
+enum {
+ TX_POWER_LIMIT_ENABLE,
+ TX_POWER_LIMIT_TABLE = 0x4,
+ TX_POWER_LIMIT_INFO = 0x7,
+ TX_POWER_LIMIT_FRAME = 0x11,
+ TX_POWER_LIMIT_FRAME_MIN = 0x12,
+};
+
+enum {
+ SPR_ENABLE = 0x1,
+ SPR_ENABLE_SD = 0x3,
+ SPR_ENABLE_MODE = 0x5,
+ SPR_ENABLE_DPD = 0x23,
+ SPR_ENABLE_TX = 0x25,
+ SPR_SET_SRG_BITMAP = 0x80,
+ SPR_SET_PARAM = 0xc2,
+ SPR_SET_SIGA = 0xdc,
+};
+
+enum {
+ THERMAL_PROTECT_PARAMETER_CTRL,
+ THERMAL_PROTECT_BASIC_INFO,
+ THERMAL_PROTECT_ENABLE,
+ THERMAL_PROTECT_DISABLE,
+ THERMAL_PROTECT_DUTY_CONFIG,
+ THERMAL_PROTECT_MECH_INFO,
+ THERMAL_PROTECT_DUTY_INFO,
+ THERMAL_PROTECT_STATE_ACT,
+};
+
+enum {
+ MT_BF_SOUNDING_ON = 1,
+ MT_BF_TYPE_UPDATE = 20,
+ MT_BF_MODULE_UPDATE = 25
+};
+
+enum {
+ MURU_SET_ARB_OP_MODE = 14,
+ MURU_SET_PLATFORM_TYPE = 25,
+};
+
+enum {
+ MURU_PLATFORM_TYPE_PERF_LEVEL_1 = 1,
+ MURU_PLATFORM_TYPE_PERF_LEVEL_2,
+};
+
+/* tx cmd tx statistics */
+enum {
+ MURU_SET_TXC_TX_STATS_EN = 150,
+ MURU_GET_TXC_TX_STATS = 151,
+};
+
+enum {
+ SER_QUERY,
+ /* recovery */
+ SER_SET_RECOVER_L1,
+ SER_SET_RECOVER_L2,
+ SER_SET_RECOVER_L3_RX_ABORT,
+ SER_SET_RECOVER_L3_TX_ABORT,
+ SER_SET_RECOVER_L3_TX_DISABLE,
+ SER_SET_RECOVER_L3_BF,
+ SER_SET_RECOVER_FULL,
+ SER_SET_SYSTEM_ASSERT,
+ /* action */
+ SER_ENABLE = 2,
+ SER_RECOVER
+};
+
+#define MT7915_MAX_BEACON_SIZE 1308
+#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct bss_info_bcn) + \
+ sizeof(struct bss_info_bcn_cntdwn) + \
+ sizeof(struct bss_info_bcn_mbss) + \
+ MT_TXD_SIZE + \
+ sizeof(struct bss_info_bcn_cont))
+#define MT7915_MAX_BSS_OFFLOAD_SIZE (MT7915_MAX_BEACON_SIZE + \
+ MT7915_BEACON_UPDATE_SIZE)
+
+#define MT7915_BSS_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
+ sizeof(struct bss_info_omac) + \
+ sizeof(struct bss_info_basic) +\
+ sizeof(struct bss_info_rf_ch) +\
+ sizeof(struct bss_info_ra) + \
+ sizeof(struct bss_info_hw_amsdu) +\
+ sizeof(struct bss_info_he) + \
+ sizeof(struct bss_info_bmc_rate) +\
+ sizeof(struct bss_info_ext_bss))
+
+static inline s8
+mt7915_get_power_bound(struct mt7915_phy *phy, s8 txpower)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ int n_chains = hweight8(mphy->antenna_mask);
+
+ txpower = mt76_get_sar_power(mphy, mphy->chandef.chan, txpower * 2);
+ txpower -= mt76_tx_power_nss_delta(n_chains);
+
+ return txpower;
+}
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
new file mode 100644
index 0000000000..f4ad7219f9
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -0,0 +1,1067 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/rtnetlink.h>
+#include <linux/pci.h>
+
+#include "mt7915.h"
+#include "mac.h"
+#include "mcu.h"
+#include "../trace.h"
+#include "../dma.h"
+
+static bool wed_enable;
+module_param(wed_enable, bool, 0644);
+MODULE_PARM_DESC(wed_enable, "Enable Wireless Ethernet Dispatch support");
+
+static const u32 mt7915_reg[] = {
+ [INT_SOURCE_CSR] = 0xd7010,
+ [INT_MASK_CSR] = 0xd7014,
+ [INT1_SOURCE_CSR] = 0xd7088,
+ [INT1_MASK_CSR] = 0xd708c,
+ [INT_MCU_CMD_SOURCE] = 0xd51f0,
+ [INT_MCU_CMD_EVENT] = 0x3108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x77ffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c3fffff,
+ [FW_ASSERT_STAT_ADDR] = 0x219848,
+ [FW_EXCEPT_TYPE_ADDR] = 0x21987c,
+ [FW_EXCEPT_COUNT_ADDR] = 0x219848,
+ [FW_CIRQ_COUNT_ADDR] = 0x216f94,
+ [FW_CIRQ_IDX_ADDR] = 0x216ef8,
+ [FW_CIRQ_LISR_ADDR] = 0x2170ac,
+ [FW_TASK_ID_ADDR] = 0x216f90,
+ [FW_TASK_IDX_ADDR] = 0x216f9c,
+ [FW_TASK_QID1_ADDR] = 0x219680,
+ [FW_TASK_QID2_ADDR] = 0x219760,
+ [FW_TASK_START_ADDR] = 0x219558,
+ [FW_TASK_END_ADDR] = 0x219554,
+ [FW_TASK_SIZE_ADDR] = 0x219560,
+ [FW_LAST_MSG_ID_ADDR] = 0x216f70,
+ [FW_EINT_INFO_ADDR] = 0x219818,
+ [FW_SCHED_INFO_ADDR] = 0x219828,
+ [SWDEF_BASE_ADDR] = 0x41f200,
+ [TXQ_WED_RING_BASE] = 0xd7300,
+ [RXQ_WED_RING_BASE] = 0xd7410,
+ [RXQ_WED_DATA_RING_BASE] = 0xd4500,
+};
+
+static const u32 mt7916_reg[] = {
+ [INT_SOURCE_CSR] = 0xd4200,
+ [INT_MASK_CSR] = 0xd4204,
+ [INT1_SOURCE_CSR] = 0xd8200,
+ [INT1_MASK_CSR] = 0xd8204,
+ [INT_MCU_CMD_SOURCE] = 0xd41f0,
+ [INT_MCU_CMD_EVENT] = 0x2108,
+ [WFDMA0_ADDR] = 0xd4000,
+ [WFDMA0_PCIE1_ADDR] = 0xd8000,
+ [WFDMA_EXT_CSR_ADDR] = 0xd7000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+ [FW_ASSERT_STAT_ADDR] = 0x02204c14,
+ [FW_EXCEPT_TYPE_ADDR] = 0x022051a4,
+ [FW_EXCEPT_COUNT_ADDR] = 0x022050bc,
+ [FW_CIRQ_COUNT_ADDR] = 0x022001ac,
+ [FW_CIRQ_IDX_ADDR] = 0x02204f84,
+ [FW_CIRQ_LISR_ADDR] = 0x022050d0,
+ [FW_TASK_ID_ADDR] = 0x0220406c,
+ [FW_TASK_IDX_ADDR] = 0x0220500c,
+ [FW_TASK_QID1_ADDR] = 0x022028c8,
+ [FW_TASK_QID2_ADDR] = 0x02202a38,
+ [FW_TASK_START_ADDR] = 0x0220286c,
+ [FW_TASK_END_ADDR] = 0x02202870,
+ [FW_TASK_SIZE_ADDR] = 0x02202878,
+ [FW_LAST_MSG_ID_ADDR] = 0x02204fe8,
+ [FW_EINT_INFO_ADDR] = 0x0220525c,
+ [FW_SCHED_INFO_ADDR] = 0x0220516c,
+ [SWDEF_BASE_ADDR] = 0x411400,
+ [TXQ_WED_RING_BASE] = 0xd7300,
+ [RXQ_WED_RING_BASE] = 0xd7410,
+ [RXQ_WED_DATA_RING_BASE] = 0xd4540,
+};
+
+static const u32 mt7986_reg[] = {
+ [INT_SOURCE_CSR] = 0x24200,
+ [INT_MASK_CSR] = 0x24204,
+ [INT1_SOURCE_CSR] = 0x28200,
+ [INT1_MASK_CSR] = 0x28204,
+ [INT_MCU_CMD_SOURCE] = 0x241f0,
+ [INT_MCU_CMD_EVENT] = 0x54000108,
+ [WFDMA0_ADDR] = 0x24000,
+ [WFDMA0_PCIE1_ADDR] = 0x28000,
+ [WFDMA_EXT_CSR_ADDR] = 0x27000,
+ [CBTOP1_PHY_END] = 0x7fffffff,
+ [INFRA_MCU_ADDR_END] = 0x7c085fff,
+ [FW_ASSERT_STAT_ADDR] = 0x02204b54,
+ [FW_EXCEPT_TYPE_ADDR] = 0x022050dc,
+ [FW_EXCEPT_COUNT_ADDR] = 0x02204ffc,
+ [FW_CIRQ_COUNT_ADDR] = 0x022001ac,
+ [FW_CIRQ_IDX_ADDR] = 0x02204ec4,
+ [FW_CIRQ_LISR_ADDR] = 0x02205010,
+ [FW_TASK_ID_ADDR] = 0x02204fac,
+ [FW_TASK_IDX_ADDR] = 0x02204f4c,
+ [FW_TASK_QID1_ADDR] = 0x02202814,
+ [FW_TASK_QID2_ADDR] = 0x02202984,
+ [FW_TASK_START_ADDR] = 0x022027b8,
+ [FW_TASK_END_ADDR] = 0x022027bc,
+ [FW_TASK_SIZE_ADDR] = 0x022027c4,
+ [FW_LAST_MSG_ID_ADDR] = 0x02204f28,
+ [FW_EINT_INFO_ADDR] = 0x02205194,
+ [FW_SCHED_INFO_ADDR] = 0x022051a4,
+ [SWDEF_BASE_ADDR] = 0x411400,
+ [TXQ_WED_RING_BASE] = 0x24420,
+ [RXQ_WED_RING_BASE] = 0x24520,
+ [RXQ_WED_DATA_RING_BASE] = 0x24540,
+};
+
+static const u32 mt7915_offs[] = {
+ [TMAC_CDTR] = 0x090,
+ [TMAC_ODTR] = 0x094,
+ [TMAC_ATCR] = 0x098,
+ [TMAC_TRCR0] = 0x09c,
+ [TMAC_ICR0] = 0x0a4,
+ [TMAC_ICR1] = 0x0b4,
+ [TMAC_CTCR0] = 0x0f4,
+ [TMAC_TFCR0] = 0x1e0,
+ [MDP_BNRCFR0] = 0x070,
+ [MDP_BNRCFR1] = 0x074,
+ [ARB_DRNGR0] = 0x194,
+ [ARB_SCR] = 0x080,
+ [RMAC_MIB_AIRTIME14] = 0x3b8,
+ [AGG_AWSCR0] = 0x05c,
+ [AGG_PCR0] = 0x06c,
+ [AGG_ACR0] = 0x084,
+ [AGG_ACR4] = 0x08c,
+ [AGG_MRCR] = 0x098,
+ [AGG_ATCR1] = 0x0f0,
+ [AGG_ATCR3] = 0x0f4,
+ [LPON_UTTR0] = 0x080,
+ [LPON_UTTR1] = 0x084,
+ [LPON_FRCR] = 0x314,
+ [MIB_SDR3] = 0x014,
+ [MIB_SDR4] = 0x018,
+ [MIB_SDR5] = 0x01c,
+ [MIB_SDR7] = 0x024,
+ [MIB_SDR8] = 0x028,
+ [MIB_SDR9] = 0x02c,
+ [MIB_SDR10] = 0x030,
+ [MIB_SDR11] = 0x034,
+ [MIB_SDR12] = 0x038,
+ [MIB_SDR13] = 0x03c,
+ [MIB_SDR14] = 0x040,
+ [MIB_SDR15] = 0x044,
+ [MIB_SDR16] = 0x048,
+ [MIB_SDR17] = 0x04c,
+ [MIB_SDR18] = 0x050,
+ [MIB_SDR19] = 0x054,
+ [MIB_SDR20] = 0x058,
+ [MIB_SDR21] = 0x05c,
+ [MIB_SDR22] = 0x060,
+ [MIB_SDR23] = 0x064,
+ [MIB_SDR24] = 0x068,
+ [MIB_SDR25] = 0x06c,
+ [MIB_SDR27] = 0x074,
+ [MIB_SDR28] = 0x078,
+ [MIB_SDR29] = 0x07c,
+ [MIB_SDRVEC] = 0x080,
+ [MIB_SDR31] = 0x084,
+ [MIB_SDR32] = 0x088,
+ [MIB_SDRMUBF] = 0x090,
+ [MIB_DR8] = 0x0c0,
+ [MIB_DR9] = 0x0c4,
+ [MIB_DR11] = 0x0cc,
+ [MIB_MB_SDR0] = 0x100,
+ [MIB_MB_SDR1] = 0x104,
+ [TX_AGG_CNT] = 0x0a8,
+ [TX_AGG_CNT2] = 0x164,
+ [MIB_ARNG] = 0x4b8,
+ [WTBLON_TOP_WDUCR] = 0x0,
+ [WTBL_UPDATE] = 0x030,
+ [PLE_FL_Q_EMPTY] = 0x0b0,
+ [PLE_FL_Q_CTRL] = 0x1b0,
+ [PLE_AC_QEMPTY] = 0x500,
+ [PLE_FREEPG_CNT] = 0x100,
+ [PLE_FREEPG_HEAD_TAIL] = 0x104,
+ [PLE_PG_HIF_GROUP] = 0x110,
+ [PLE_HIF_PG_INFO] = 0x114,
+ [AC_OFFSET] = 0x040,
+ [ETBF_PAR_RPT0] = 0x068,
+};
+
+static const u32 mt7916_offs[] = {
+ [TMAC_CDTR] = 0x0c8,
+ [TMAC_ODTR] = 0x0cc,
+ [TMAC_ATCR] = 0x00c,
+ [TMAC_TRCR0] = 0x010,
+ [TMAC_ICR0] = 0x014,
+ [TMAC_ICR1] = 0x018,
+ [TMAC_CTCR0] = 0x114,
+ [TMAC_TFCR0] = 0x0e4,
+ [MDP_BNRCFR0] = 0x090,
+ [MDP_BNRCFR1] = 0x094,
+ [ARB_DRNGR0] = 0x1e0,
+ [ARB_SCR] = 0x000,
+ [RMAC_MIB_AIRTIME14] = 0x0398,
+ [AGG_AWSCR0] = 0x030,
+ [AGG_PCR0] = 0x040,
+ [AGG_ACR0] = 0x054,
+ [AGG_ACR4] = 0x05c,
+ [AGG_MRCR] = 0x068,
+ [AGG_ATCR1] = 0x1a8,
+ [AGG_ATCR3] = 0x080,
+ [LPON_UTTR0] = 0x360,
+ [LPON_UTTR1] = 0x364,
+ [LPON_FRCR] = 0x37c,
+ [MIB_SDR3] = 0x698,
+ [MIB_SDR4] = 0x788,
+ [MIB_SDR5] = 0x780,
+ [MIB_SDR7] = 0x5a8,
+ [MIB_SDR8] = 0x78c,
+ [MIB_SDR9] = 0x024,
+ [MIB_SDR10] = 0x76c,
+ [MIB_SDR11] = 0x790,
+ [MIB_SDR12] = 0x558,
+ [MIB_SDR13] = 0x560,
+ [MIB_SDR14] = 0x564,
+ [MIB_SDR15] = 0x568,
+ [MIB_SDR16] = 0x7fc,
+ [MIB_SDR17] = 0x800,
+ [MIB_SDR18] = 0x030,
+ [MIB_SDR19] = 0x5ac,
+ [MIB_SDR20] = 0x5b0,
+ [MIB_SDR21] = 0x5b4,
+ [MIB_SDR22] = 0x770,
+ [MIB_SDR23] = 0x774,
+ [MIB_SDR24] = 0x778,
+ [MIB_SDR25] = 0x77c,
+ [MIB_SDR27] = 0x080,
+ [MIB_SDR28] = 0x084,
+ [MIB_SDR29] = 0x650,
+ [MIB_SDRVEC] = 0x5a8,
+ [MIB_SDR31] = 0x55c,
+ [MIB_SDR32] = 0x7a8,
+ [MIB_SDRMUBF] = 0x7ac,
+ [MIB_DR8] = 0x56c,
+ [MIB_DR9] = 0x570,
+ [MIB_DR11] = 0x574,
+ [MIB_MB_SDR0] = 0x688,
+ [MIB_MB_SDR1] = 0x690,
+ [TX_AGG_CNT] = 0x7dc,
+ [TX_AGG_CNT2] = 0x7ec,
+ [MIB_ARNG] = 0x0b0,
+ [WTBLON_TOP_WDUCR] = 0x200,
+ [WTBL_UPDATE] = 0x230,
+ [PLE_FL_Q_EMPTY] = 0x360,
+ [PLE_FL_Q_CTRL] = 0x3e0,
+ [PLE_AC_QEMPTY] = 0x600,
+ [PLE_FREEPG_CNT] = 0x380,
+ [PLE_FREEPG_HEAD_TAIL] = 0x384,
+ [PLE_PG_HIF_GROUP] = 0x00c,
+ [PLE_HIF_PG_INFO] = 0x388,
+ [AC_OFFSET] = 0x080,
+ [ETBF_PAR_RPT0] = 0x100,
+};
+
+static const struct mt76_connac_reg_map mt7915_reg_map[] = {
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static const struct mt76_connac_reg_map mt7916_reg_map[] = {
+ { 0x54000000, 0x02000, 0x01000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x03000, 0x01000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x04000, 0x01000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x05000, 0x01000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x06000, 0x01000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x07000, 0x01000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x08000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x20000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x21c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x21e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x22000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x23400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x26000, 0x02000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure cr) */
+ { 0x820f0000, 0xa0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0xa8000, 0x01000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0xae000, 0x01000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static const struct mt76_connac_reg_map mt7986_reg_map[] = {
+ { 0x54000000, 0x402000, 0x01000 }, /* WFDMA_0 (PCIE0 MCU DMA0) */
+ { 0x55000000, 0x403000, 0x01000 }, /* WFDMA_1 (PCIE0 MCU DMA1) */
+ { 0x56000000, 0x404000, 0x01000 }, /* WFDMA_2 (Reserved) */
+ { 0x57000000, 0x405000, 0x01000 }, /* WFDMA_3 (MCU wrap CR) */
+ { 0x58000000, 0x406000, 0x01000 }, /* WFDMA_4 (PCIE1 MCU DMA0) */
+ { 0x59000000, 0x407000, 0x01000 }, /* WFDMA_5 (PCIE1 MCU DMA1) */
+ { 0x820c0000, 0x408000, 0x04000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x40c000, 0x02000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x40e000, 0x02000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820e0000, 0x420000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x420400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x420800, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x420c00, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x421000, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x421400, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820ce000, 0x421c00, 0x00200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820e7000, 0x421e00, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820cf000, 0x422000, 0x01000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820e9000, 0x423400, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x424000, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x424200, 0x00400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x424600, 0x00200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x424800, 0x00800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820ca000, 0x426000, 0x02000 }, /* WF_LMAC_TOP BN0 (WF_MUCOP) */
+ { 0x820d0000, 0x430000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x00400000, 0x480000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x490000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x820f0000, 0x4a0000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0x4a0600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0x4a0800, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0x4a0c00, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0x4a1000, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0x4a1400, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0x4a1e00, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0x4a3400, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0x4a4000, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0x4a4200, 0x00400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0x4a4600, 0x00200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0x4a4800, 0x00800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ { 0x820c4000, 0x4a8000, 0x01000 }, /* WF_LMAC_TOP (WF_UWTBL ) */
+ { 0x820b0000, 0x4ae000, 0x01000 }, /* [APB2] WFSYS_ON */
+ { 0x80020000, 0x4b0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0x4c0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x89000000, 0x4d0000, 0x01000 }, /* WF_MCU_CFG_ON */
+ { 0x89010000, 0x4d1000, 0x01000 }, /* WF_MCU_CIRQ */
+ { 0x89020000, 0x4d2000, 0x01000 }, /* WF_MCU_GPT */
+ { 0x89030000, 0x4d3000, 0x01000 }, /* WF_MCU_WDT */
+ { 0x80010000, 0x4d4000, 0x01000 }, /* WF_AXIDMA */
+ { 0x0, 0x0, 0x0 }, /* imply end of search */
+};
+
+static u32 mt7915_reg_map_l1(struct mt7915_dev *dev, u32 addr)
+{
+ u32 offset = FIELD_GET(MT_HIF_REMAP_L1_OFFSET, addr);
+ u32 base = FIELD_GET(MT_HIF_REMAP_L1_BASE, addr);
+ u32 l1_remap;
+
+ if (is_mt798x(&dev->mt76))
+ return MT_CONN_INFRA_OFFSET(addr);
+
+ l1_remap = is_mt7915(&dev->mt76) ?
+ MT_HIF_REMAP_L1 : MT_HIF_REMAP_L1_MT7916;
+
+ dev->bus_ops->rmw(&dev->mt76, l1_remap,
+ MT_HIF_REMAP_L1_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L1_MASK, base));
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, l1_remap);
+
+ return MT_HIF_REMAP_BASE_L1 + offset;
+}
+
+static u32 mt7915_reg_map_l2(struct mt7915_dev *dev, u32 addr)
+{
+ u32 offset, base;
+
+ if (is_mt7915(&dev->mt76)) {
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2,
+ MT_HIF_REMAP_L2_MASK,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK, base));
+
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2);
+ } else {
+ u32 ofs = is_mt798x(&dev->mt76) ? 0x400000 : 0;
+
+ offset = FIELD_GET(MT_HIF_REMAP_L2_OFFSET_MT7916, addr);
+ base = FIELD_GET(MT_HIF_REMAP_L2_BASE_MT7916, addr);
+
+ dev->bus_ops->rmw(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs,
+ MT_HIF_REMAP_L2_MASK_MT7916,
+ FIELD_PREP(MT_HIF_REMAP_L2_MASK_MT7916, base));
+
+ /* use read to push write */
+ dev->bus_ops->rr(&dev->mt76, MT_HIF_REMAP_L2_MT7916 + ofs);
+
+ offset += (MT_HIF_REMAP_BASE_L2_MT7916 + ofs);
+ }
+
+ return offset;
+}
+
+static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+{
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ if (!dev->reg.map) {
+ dev_err(dev->mt76.dev, "err: reg_map is null\n");
+ return addr;
+ }
+
+ for (i = 0; i < dev->reg.map_size; i++) {
+ u32 ofs;
+
+ if (addr < dev->reg.map[i].phys)
+ continue;
+
+ ofs = addr - dev->reg.map[i].phys;
+ if (ofs > dev->reg.map[i].size)
+ continue;
+
+ return dev->reg.map[i].maps + ofs;
+ }
+
+ if ((addr >= MT_INFRA_BASE && addr < MT_WFSYS0_PHY_START) ||
+ (addr >= MT_WFSYS0_PHY_START && addr < MT_WFSYS1_PHY_START) ||
+ (addr >= MT_WFSYS1_PHY_START && addr <= MT_WFSYS1_PHY_END))
+ return mt7915_reg_map_l1(dev, addr);
+
+ if (dev_is_pci(dev->mt76.dev) &&
+ ((addr >= MT_CBTOP1_PHY_START && addr <= MT_CBTOP1_PHY_END) ||
+ addr >= MT_CBTOP2_PHY_START))
+ return mt7915_reg_map_l1(dev, addr);
+
+ /* CONN_INFRA: covert to phyiscal addr and use layer 1 remap */
+ if (addr >= MT_INFRA_MCU_START && addr <= MT_INFRA_MCU_END) {
+ addr = addr - MT_INFRA_MCU_START + MT_INFRA_BASE;
+ return mt7915_reg_map_l1(dev, addr);
+ }
+
+ return mt7915_reg_map_l2(dev, addr);
+}
+
+void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
+ size_t len)
+{
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ memcpy_fromio(buf, dev->mt76.mmio.regs + addr, len);
+}
+
+static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static int mt7915_mmio_wed_offload_enable(struct mtk_wed_device *wed)
+{
+ struct mt7915_dev *dev;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+
+ spin_lock_bh(&dev->mt76.token_lock);
+ dev->mt76.token_size = wed->wlan.token_start;
+ spin_unlock_bh(&dev->mt76.token_lock);
+
+ return !wait_event_timeout(dev->mt76.tx_wait,
+ !dev->mt76.wed_token_count, HZ);
+}
+
+static void mt7915_mmio_wed_offload_disable(struct mtk_wed_device *wed)
+{
+ struct mt7915_dev *dev;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+
+ spin_lock_bh(&dev->mt76.token_lock);
+ dev->mt76.token_size = MT7915_TOKEN_SIZE;
+ spin_unlock_bh(&dev->mt76.token_lock);
+}
+
+static void mt7915_mmio_wed_release_rx_buf(struct mtk_wed_device *wed)
+{
+ struct mt7915_dev *dev;
+ int i;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+ for (i = 0; i < dev->mt76.rx_token_size; i++) {
+ struct mt76_txwi_cache *t;
+
+ t = mt76_rx_token_release(&dev->mt76, i);
+ if (!t || !t->ptr)
+ continue;
+
+ mt76_put_page_pool_buf(t->ptr, false);
+ t->ptr = NULL;
+
+ mt76_put_rxwi(&dev->mt76, t);
+ }
+
+ mt76_free_pending_rxwi(&dev->mt76);
+}
+
+static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
+{
+ struct mtk_rxbm_desc *desc = wed->rx_buf_ring.desc;
+ struct mt76_txwi_cache *t = NULL;
+ struct mt7915_dev *dev;
+ struct mt76_queue *q;
+ int i, len;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+ q = &dev->mt76.q_rx[MT_RXQ_MAIN];
+ len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ for (i = 0; i < size; i++) {
+ enum dma_data_direction dir;
+ dma_addr_t addr;
+ u32 offset;
+ int token;
+ void *buf;
+
+ t = mt76_get_rxwi(&dev->mt76);
+ if (!t)
+ goto unmap;
+
+ buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
+ if (!buf)
+ goto unmap;
+
+ addr = page_pool_get_dma_addr(virt_to_head_page(buf)) + offset;
+ dir = page_pool_get_dma_dir(q->page_pool);
+ dma_sync_single_for_device(dev->mt76.dma_dev, addr, len, dir);
+
+ desc->buf0 = cpu_to_le32(addr);
+ token = mt76_rx_token_consume(&dev->mt76, buf, t, addr);
+ if (token < 0) {
+ mt76_put_page_pool_buf(buf, false);
+ goto unmap;
+ }
+
+ desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
+ token));
+ desc++;
+ }
+
+ return 0;
+
+unmap:
+ if (t)
+ mt76_put_rxwi(&dev->mt76, t);
+ mt7915_mmio_wed_release_rx_buf(wed);
+ return -ENOMEM;
+}
+
+static void mt7915_mmio_wed_update_rx_stats(struct mtk_wed_device *wed,
+ struct mtk_wed_wo_rx_stats *stats)
+{
+ int idx = le16_to_cpu(stats->wlan_idx);
+ struct mt7915_dev *dev;
+ struct mt76_wcid *wcid;
+
+ dev = container_of(wed, struct mt7915_dev, mt76.mmio.wed);
+
+ if (idx >= mt7915_wtbl_size(dev))
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ if (wcid) {
+ wcid->stats.rx_bytes += le32_to_cpu(stats->rx_byte_cnt);
+ wcid->stats.rx_packets += le32_to_cpu(stats->rx_pkt_cnt);
+ wcid->stats.rx_errors += le32_to_cpu(stats->rx_err_cnt);
+ wcid->stats.rx_drops += le32_to_cpu(stats->rx_drop_cnt);
+ }
+
+ rcu_read_unlock();
+}
+
+static int mt7915_mmio_wed_reset(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *mdev = container_of(wed, struct mt76_dev, mmio.wed);
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ struct mt76_phy *mphy = &dev->mphy;
+ int ret;
+
+ ASSERT_RTNL();
+
+ if (test_and_set_bit(MT76_STATE_WED_RESET, &mphy->state))
+ return -EBUSY;
+
+ ret = mt7915_mcu_set_ser(dev, SER_RECOVER, SER_SET_RECOVER_L1,
+ mphy->band_idx);
+ if (ret)
+ goto out;
+
+ rtnl_unlock();
+ if (!wait_for_completion_timeout(&mdev->mmio.wed_reset, 20 * HZ)) {
+ dev_err(mdev->dev, "wed reset timeout\n");
+ ret = -ETIMEDOUT;
+ }
+ rtnl_lock();
+out:
+ clear_bit(MT76_STATE_WED_RESET, &mphy->state);
+
+ return ret;
+}
+
+static void mt7915_mmio_wed_reset_complete(struct mtk_wed_device *wed)
+{
+ struct mt76_dev *dev = container_of(wed, struct mt76_dev, mmio.wed);
+
+ complete(&dev->mmio.wed_reset_complete);
+}
+#endif
+
+int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
+ bool pci, int *irq)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ int ret;
+
+ if (!wed_enable)
+ return 0;
+
+ if (pci) {
+ struct pci_dev *pci_dev = pdev_ptr;
+
+ wed->wlan.pci_dev = pci_dev;
+ wed->wlan.bus_type = MTK_WED_BUS_PCIE;
+ wed->wlan.base = devm_ioremap(dev->mt76.dev,
+ pci_resource_start(pci_dev, 0),
+ pci_resource_len(pci_dev, 0));
+ wed->wlan.phy_base = pci_resource_start(pci_dev, 0);
+ wed->wlan.wpdma_int = pci_resource_start(pci_dev, 0) +
+ MT_INT_WED_SOURCE_CSR;
+ wed->wlan.wpdma_mask = pci_resource_start(pci_dev, 0) +
+ MT_INT_WED_MASK_CSR;
+ wed->wlan.wpdma_phys = pci_resource_start(pci_dev, 0) +
+ MT_WFDMA_EXT_CSR_BASE;
+ wed->wlan.wpdma_tx = pci_resource_start(pci_dev, 0) +
+ MT_TXQ_WED_RING_BASE;
+ wed->wlan.wpdma_txfree = pci_resource_start(pci_dev, 0) +
+ MT_RXQ_WED_RING_BASE;
+ wed->wlan.wpdma_rx_glo = pci_resource_start(pci_dev, 0) +
+ MT_WPDMA_GLO_CFG;
+ wed->wlan.wpdma_rx = pci_resource_start(pci_dev, 0) +
+ MT_RXQ_WED_DATA_RING_BASE;
+ } else {
+ struct platform_device *plat_dev = pdev_ptr;
+ struct resource *res;
+
+ res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
+ if (!res)
+ return 0;
+
+ wed->wlan.platform_dev = plat_dev;
+ wed->wlan.bus_type = MTK_WED_BUS_AXI;
+ wed->wlan.base = devm_ioremap(dev->mt76.dev, res->start,
+ resource_size(res));
+ wed->wlan.phy_base = res->start;
+ wed->wlan.wpdma_int = res->start + MT_INT_SOURCE_CSR;
+ wed->wlan.wpdma_mask = res->start + MT_INT_MASK_CSR;
+ wed->wlan.wpdma_tx = res->start + MT_TXQ_WED_RING_BASE;
+ wed->wlan.wpdma_txfree = res->start + MT_RXQ_WED_RING_BASE;
+ wed->wlan.wpdma_rx_glo = res->start + MT_WPDMA_GLO_CFG;
+ wed->wlan.wpdma_rx = res->start + MT_RXQ_WED_DATA_RING_BASE;
+ }
+ wed->wlan.nbuf = MT7915_HW_TOKEN_SIZE;
+ wed->wlan.tx_tbit[0] = is_mt7915(&dev->mt76) ? 4 : 30;
+ wed->wlan.tx_tbit[1] = is_mt7915(&dev->mt76) ? 5 : 31;
+ wed->wlan.txfree_tbit = is_mt798x(&dev->mt76) ? 2 : 1;
+ wed->wlan.token_start = MT7915_TOKEN_SIZE - wed->wlan.nbuf;
+ wed->wlan.wcid_512 = !is_mt7915(&dev->mt76);
+
+ wed->wlan.rx_nbuf = 65536;
+ wed->wlan.rx_npkt = MT7915_WED_RX_TOKEN_SIZE;
+ wed->wlan.rx_size = SKB_WITH_OVERHEAD(MT_RX_BUF_SIZE);
+ if (is_mt7915(&dev->mt76)) {
+ wed->wlan.rx_tbit[0] = 16;
+ wed->wlan.rx_tbit[1] = 17;
+ } else if (is_mt798x(&dev->mt76)) {
+ wed->wlan.rx_tbit[0] = 22;
+ wed->wlan.rx_tbit[1] = 23;
+ } else {
+ wed->wlan.rx_tbit[0] = 18;
+ wed->wlan.rx_tbit[1] = 19;
+ }
+
+ wed->wlan.init_buf = mt7915_wed_init_buf;
+ wed->wlan.offload_enable = mt7915_mmio_wed_offload_enable;
+ wed->wlan.offload_disable = mt7915_mmio_wed_offload_disable;
+ wed->wlan.init_rx_buf = mt7915_mmio_wed_init_rx_buf;
+ wed->wlan.release_rx_buf = mt7915_mmio_wed_release_rx_buf;
+ wed->wlan.update_wo_rx_stats = mt7915_mmio_wed_update_rx_stats;
+ wed->wlan.reset = mt7915_mmio_wed_reset;
+ wed->wlan.reset_complete = mt7915_mmio_wed_reset_complete;
+
+ dev->mt76.rx_token_size = wed->wlan.rx_npkt;
+
+ if (mtk_wed_device_attach(wed))
+ return 0;
+
+ *irq = wed->irq;
+ dev->mt76.dma_dev = wed->dev;
+
+ ret = dma_set_mask(wed->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+static int mt7915_mmio_init(struct mt76_dev *mdev,
+ void __iomem *mem_base,
+ u32 device_id)
+{
+ struct mt76_bus_ops *bus_ops;
+ struct mt7915_dev *dev;
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+ mt76_mmio_init(&dev->mt76, mem_base);
+
+ switch (device_id) {
+ case 0x7915:
+ dev->reg.reg_rev = mt7915_reg;
+ dev->reg.offs_rev = mt7915_offs;
+ dev->reg.map = mt7915_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7915_reg_map);
+ break;
+ case 0x7906:
+ dev->reg.reg_rev = mt7916_reg;
+ dev->reg.offs_rev = mt7916_offs;
+ dev->reg.map = mt7916_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7916_reg_map);
+ break;
+ case 0x7981:
+ case 0x7986:
+ dev->reg.reg_rev = mt7986_reg;
+ dev->reg.offs_rev = mt7916_offs;
+ dev->reg.map = mt7986_reg_map;
+ dev->reg.map_size = ARRAY_SIZE(mt7986_reg_map);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7915_rr;
+ bus_ops->wr = mt7915_wr;
+ bus_ops->rmw = mt7915_rmw;
+ dev->mt76.bus = bus_ops;
+
+ mdev->rev = (device_id << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ return 0;
+}
+
+void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev,
+ bool write_reg,
+ u32 clear, u32 set)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdev->mmio.irq_lock, flags);
+
+ mdev->mmio.irqmask &= ~clear;
+ mdev->mmio.irqmask |= set;
+
+ if (write_reg) {
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mtk_wed_device_irq_set_mask(&mdev->mmio.wed,
+ mdev->mmio.irqmask);
+ else
+ mt76_wr(dev, MT_INT_MASK_CSR, mdev->mmio.irqmask);
+ mt76_wr(dev, MT_INT1_MASK_CSR, mdev->mmio.irqmask);
+ }
+
+ spin_unlock_irqrestore(&mdev->mmio.irq_lock, flags);
+}
+
+static void mt7915_rx_poll_complete(struct mt76_dev *mdev,
+ enum mt76_rxq_id q)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ mt7915_irq_enable(dev, MT_INT_RX(q));
+}
+
+/* TODO: support 2/4/6/8 MSI-X vectors */
+static void mt7915_irq_tasklet(struct tasklet_struct *t)
+{
+ struct mt7915_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+ u32 intr, intr1, mask;
+
+ if (mtk_wed_device_active(wed)) {
+ mtk_wed_device_irq_set_mask(wed, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ intr = mtk_wed_device_irq_get(wed, dev->mt76.mmio.irqmask);
+ } else {
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
+ }
+
+ if (dev->hif2) {
+ intr1 = mt76_rr(dev, MT_INT1_SOURCE_CSR);
+ intr1 &= dev->mt76.mmio.irqmask;
+ mt76_wr(dev, MT_INT1_SOURCE_CSR, intr1);
+
+ intr |= intr1;
+ }
+
+ trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
+
+ mask = intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
+
+ mt7915_irq_disable(dev, mask);
+
+ if (intr & MT_INT_TX_DONE_MCU)
+ napi_schedule(&dev->mt76.tx_napi);
+
+ if (intr & MT_INT_RX(MT_RXQ_MAIN))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
+
+ if (intr & MT_INT_RX(MT_RXQ_BAND1))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND1]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU]);
+
+ if (intr & MT_INT_RX(MT_RXQ_MCU_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+
+ if (!is_mt7915(&dev->mt76) &&
+ (intr & MT_INT_RX(MT_RXQ_MAIN_WA)))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN_WA]);
+
+ if (intr & MT_INT_RX(MT_RXQ_BAND1_WA))
+ napi_schedule(&dev->mt76.napi[MT_RXQ_BAND1_WA]);
+
+ if (intr & MT_INT_MCU_CMD) {
+ u32 val = mt76_rr(dev, MT_MCU_CMD);
+
+ mt76_wr(dev, MT_MCU_CMD, val);
+ if (val & (MT_MCU_CMD_ERROR_MASK | MT_MCU_CMD_WDT_MASK)) {
+ dev->recovery.state = val;
+ mt7915_reset(dev);
+ }
+ }
+}
+
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
+{
+ struct mt7915_dev *dev = dev_instance;
+ struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
+
+ if (mtk_wed_device_active(wed))
+ mtk_wed_device_irq_set_mask(wed, 0);
+ else
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
+ if (dev->hif2)
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
+ return IRQ_NONE;
+
+ tasklet_schedule(&dev->mt76.irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ /* txwi_size = txd size + txp size */
+ .txwi_size = MT_TXD_SIZE + sizeof(struct mt76_connac_fw_txp),
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
+ MT_DRV_AMSDU_OFFLOAD,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .token_size = MT7915_TOKEN_SIZE,
+ .tx_prepare_skb = mt7915_tx_prepare_skb,
+ .tx_complete_skb = mt76_connac_tx_complete_skb,
+ .rx_skb = mt7915_queue_rx_skb,
+ .rx_check = mt7915_rx_check,
+ .rx_poll_complete = mt7915_rx_poll_complete,
+ .sta_add = mt7915_mac_sta_add,
+ .sta_remove = mt7915_mac_sta_remove,
+ .update_survey = mt7915_update_channel,
+ };
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int ret;
+
+ mdev = mt76_alloc_device(pdev, sizeof(*dev), &mt7915_ops, &drv_ops);
+ if (!mdev)
+ return ERR_PTR(-ENOMEM);
+
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+
+ ret = mt7915_mmio_init(mdev, mem_base, device_id);
+ if (ret)
+ goto error;
+
+ tasklet_setup(&mdev->irq_tasklet, mt7915_irq_tasklet);
+
+ return dev;
+
+error:
+ mt76_free_device(&dev->mt76);
+
+ return ERR_PTR(ret);
+}
+
+static int __init mt7915_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&mt7915_hif_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&mt7915_pci_driver);
+ if (ret)
+ goto error_pci;
+
+ if (IS_ENABLED(CONFIG_MT798X_WMAC)) {
+ ret = platform_driver_register(&mt798x_wmac_driver);
+ if (ret)
+ goto error_wmac;
+ }
+
+ return 0;
+
+error_wmac:
+ pci_unregister_driver(&mt7915_pci_driver);
+error_pci:
+ pci_unregister_driver(&mt7915_hif_driver);
+
+ return ret;
+}
+
+static void __exit mt7915_exit(void)
+{
+ if (IS_ENABLED(CONFIG_MT798X_WMAC))
+ platform_driver_unregister(&mt798x_wmac_driver);
+
+ pci_unregister_driver(&mt7915_pci_driver);
+ pci_unregister_driver(&mt7915_hif_driver);
+}
+
+module_init(mt7915_init);
+module_exit(mt7915_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
new file mode 100644
index 0000000000..21984e9723
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -0,0 +1,596 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_H
+#define __MT7915_H
+
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include "../mt76_connac.h"
+#include "regs.h"
+
+#define MT7915_MAX_INTERFACES 19
+#define MT7915_WTBL_SIZE 288
+#define MT7916_WTBL_SIZE 544
+#define MT7915_WTBL_RESERVED (mt7915_wtbl_size(dev) - 1)
+#define MT7915_WTBL_STA (MT7915_WTBL_RESERVED - \
+ MT7915_MAX_INTERFACES)
+
+#define MT7915_WATCHDOG_TIME (HZ / 10)
+#define MT7915_RESET_TIMEOUT (30 * HZ)
+
+#define MT7915_TX_RING_SIZE 2048
+#define MT7915_TX_MCU_RING_SIZE 256
+#define MT7915_TX_FWDL_RING_SIZE 128
+
+#define MT7915_RX_RING_SIZE 1536
+#define MT7915_RX_MCU_RING_SIZE 512
+
+#define MT7915_FIRMWARE_WA "mediatek/mt7915_wa.bin"
+#define MT7915_FIRMWARE_WM "mediatek/mt7915_wm.bin"
+#define MT7915_ROM_PATCH "mediatek/mt7915_rom_patch.bin"
+
+#define MT7916_FIRMWARE_WA "mediatek/mt7916_wa.bin"
+#define MT7916_FIRMWARE_WM "mediatek/mt7916_wm.bin"
+#define MT7916_ROM_PATCH "mediatek/mt7916_rom_patch.bin"
+
+#define MT7981_FIRMWARE_WA "mediatek/mt7981_wa.bin"
+#define MT7981_FIRMWARE_WM "mediatek/mt7981_wm.bin"
+#define MT7981_ROM_PATCH "mediatek/mt7981_rom_patch.bin"
+
+#define MT7986_FIRMWARE_WA "mediatek/mt7986_wa.bin"
+#define MT7986_FIRMWARE_WM "mediatek/mt7986_wm.bin"
+#define MT7986_FIRMWARE_WM_MT7975 "mediatek/mt7986_wm_mt7975.bin"
+#define MT7986_ROM_PATCH "mediatek/mt7986_rom_patch.bin"
+#define MT7986_ROM_PATCH_MT7975 "mediatek/mt7986_rom_patch_mt7975.bin"
+
+#define MT7915_EEPROM_DEFAULT "mediatek/mt7915_eeprom.bin"
+#define MT7915_EEPROM_DEFAULT_DBDC "mediatek/mt7915_eeprom_dbdc.bin"
+#define MT7916_EEPROM_DEFAULT "mediatek/mt7916_eeprom.bin"
+
+#define MT7981_EEPROM_MT7976_DEFAULT_DBDC "mediatek/mt7981_eeprom_mt7976_dbdc.bin"
+
+#define MT7986_EEPROM_MT7975_DEFAULT "mediatek/mt7986_eeprom_mt7975.bin"
+#define MT7986_EEPROM_MT7975_DUAL_DEFAULT "mediatek/mt7986_eeprom_mt7975_dual.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT "mediatek/mt7986_eeprom_mt7976.bin"
+#define MT7986_EEPROM_MT7976_DEFAULT_DBDC "mediatek/mt7986_eeprom_mt7976_dbdc.bin"
+#define MT7986_EEPROM_MT7976_DUAL_DEFAULT "mediatek/mt7986_eeprom_mt7976_dual.bin"
+
+#define MT7915_EEPROM_SIZE 3584
+#define MT7916_EEPROM_SIZE 4096
+
+#define MT7915_EEPROM_BLOCK_SIZE 16
+#define MT7915_HW_TOKEN_SIZE 4096
+#define MT7915_TOKEN_SIZE 8192
+
+#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
+#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
+
+#define MT7915_THERMAL_THROTTLE_MAX 100
+#define MT7915_CDEV_THROTTLE_MAX 99
+
+#define MT7915_SKU_RATE_NUM 161
+
+#define MT7915_MAX_TWT_AGRT 16
+#define MT7915_MAX_STA_TWT_AGRT 8
+#define MT7915_MIN_TWT_DUR 64
+#define MT7915_MAX_QUEUE (MT_RXQ_BAND2 + __MT_MCUQ_MAX + 2)
+
+#define MT7915_WED_RX_TOKEN_SIZE 12288
+
+#define MT7915_CRIT_TEMP_IDX 0
+#define MT7915_MAX_TEMP_IDX 1
+#define MT7915_CRIT_TEMP 110
+#define MT7915_MAX_TEMP 120
+
+struct mt7915_vif;
+struct mt7915_sta;
+struct mt7915_dfs_pulse;
+struct mt7915_dfs_pattern;
+
+enum mt7915_txq_id {
+ MT7915_TXQ_FWDL = 16,
+ MT7915_TXQ_MCU_WM,
+ MT7915_TXQ_BAND0,
+ MT7915_TXQ_BAND1,
+ MT7915_TXQ_MCU_WA,
+};
+
+enum mt7915_rxq_id {
+ MT7915_RXQ_BAND0 = 0,
+ MT7915_RXQ_BAND1,
+ MT7915_RXQ_MCU_WM = 0,
+ MT7915_RXQ_MCU_WA,
+ MT7915_RXQ_MCU_WA_EXT,
+};
+
+enum mt7916_rxq_id {
+ MT7916_RXQ_MCU_WM = 0,
+ MT7916_RXQ_MCU_WA,
+ MT7916_RXQ_MCU_WA_MAIN,
+ MT7916_RXQ_MCU_WA_EXT,
+ MT7916_RXQ_BAND0,
+ MT7916_RXQ_BAND1,
+};
+
+struct mt7915_twt_flow {
+ struct list_head list;
+ u64 start_tsf;
+ u64 tsf;
+ u32 duration;
+ u16 wcid;
+ __le16 mantissa;
+ u8 exp;
+ u8 table_id;
+ u8 id;
+ u8 protection:1;
+ u8 flowtype:1;
+ u8 trigger:1;
+ u8 sched:1;
+};
+
+DECLARE_EWMA(avg_signal, 10, 8)
+
+struct mt7915_sta {
+ struct mt76_wcid wcid; /* must be first */
+
+ struct mt7915_vif *vif;
+
+ struct list_head rc_list;
+ u32 airtime_ac[8];
+
+ int ack_signal;
+ struct ewma_avg_signal avg_ack_signal;
+
+ unsigned long changed;
+ unsigned long jiffies;
+ struct mt76_connac_sta_key_conf bip;
+
+ struct {
+ u8 flowid_mask;
+ struct mt7915_twt_flow flow[MT7915_MAX_STA_TWT_AGRT];
+ } twt;
+};
+
+struct mt7915_vif_cap {
+ bool ht_ldpc:1;
+ bool vht_ldpc:1;
+ bool he_ldpc:1;
+ bool vht_su_ebfer:1;
+ bool vht_su_ebfee:1;
+ bool vht_mu_ebfer:1;
+ bool vht_mu_ebfee:1;
+ bool he_su_ebfer:1;
+ bool he_su_ebfee:1;
+ bool he_mu_ebfer:1;
+};
+
+struct mt7915_vif {
+ struct mt76_vif mt76; /* must be first */
+
+ struct mt7915_vif_cap cap;
+ struct mt7915_sta sta;
+ struct mt7915_phy *phy;
+
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ struct cfg80211_bitrate_mask bitrate_mask;
+};
+
+/* crash-dump */
+struct mt7915_crash_data {
+ guid_t guid;
+ struct timespec64 timestamp;
+
+ u8 *memdump_buf;
+ size_t memdump_buf_len;
+};
+
+struct mt7915_hif {
+ struct list_head list;
+
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+};
+
+struct mt7915_phy {
+ struct mt76_phy *mt76;
+ struct mt7915_dev *dev;
+
+ struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
+
+ struct ieee80211_vif *monitor_vif;
+
+ struct thermal_cooling_device *cdev;
+ u8 cdev_state;
+ u8 throttle_state;
+ u32 throttle_temp[2]; /* 0: critical high, 1: maximum */
+
+ u32 rxfilter;
+ u64 omac_mask;
+
+ u16 noise;
+
+ s16 coverage_class;
+ u8 slottime;
+
+ u8 rdd_state;
+
+ u32 trb_ts;
+
+ u32 rx_ampdu_ts;
+ u32 ampdu_ref;
+
+ struct mt76_mib_stats mib;
+ struct mt76_channel_state state_ts;
+
+#ifdef CONFIG_NL80211_TESTMODE
+ struct {
+ u32 *reg_backup;
+
+ s32 last_freq_offset;
+ u8 last_rcpi[4];
+ s8 last_ib_rssi[4];
+ s8 last_wb_rssi[4];
+ u8 last_snr;
+
+ u8 spe_idx;
+ } test;
+#endif
+};
+
+struct mt7915_dev {
+ union { /* must be first */
+ struct mt76_dev mt76;
+ struct mt76_phy mphy;
+ };
+
+ struct mt7915_hif *hif2;
+ struct mt7915_reg_desc reg;
+ u8 q_id[MT7915_MAX_QUEUE];
+ u32 q_int_mask[MT7915_MAX_QUEUE];
+ u32 wfdma_mask;
+
+ const struct mt76_bus_ops *bus_ops;
+ struct mt7915_phy phy;
+
+ /* monitor rx chain configured channel */
+ struct cfg80211_chan_def rdd2_chandef;
+ struct mt7915_phy *rdd2_phy;
+
+ u16 chainmask;
+ u16 chainshift;
+ u32 hif_idx;
+
+ struct work_struct init_work;
+ struct work_struct rc_work;
+ struct work_struct dump_work;
+ struct work_struct reset_work;
+ wait_queue_head_t reset_wait;
+
+ struct {
+ u32 state;
+ u32 wa_reset_count;
+ u32 wm_reset_count;
+ bool hw_full_reset:1;
+ bool hw_init_done:1;
+ bool restart:1;
+ } recovery;
+
+ /* protects coredump data */
+ struct mutex dump_mutex;
+#ifdef CONFIG_DEV_COREDUMP
+ struct {
+ struct mt7915_crash_data *crash_data;
+ } coredump;
+#endif
+
+ struct list_head sta_rc_list;
+ struct list_head twt_list;
+
+ u32 hw_pattern;
+
+ bool dbdc_support;
+ bool flash_mode;
+ bool muru_debug;
+ bool ibf;
+
+ struct dentry *debugfs_dir;
+ struct rchan *relay_fwlog;
+
+ void *cal;
+
+ struct {
+ u8 debug_wm;
+ u8 debug_wa;
+ u8 debug_bin;
+ } fw;
+
+ struct {
+ u16 table_mask;
+ u8 n_agrt;
+ } twt;
+
+ struct reset_control *rstc;
+ void __iomem *dcm;
+ void __iomem *sku;
+};
+
+enum {
+ WFDMA0 = 0x0,
+ WFDMA1,
+ WFDMA_EXT,
+ __MT_WFDMA_MAX,
+};
+
+enum {
+ MT_RX_SEL0,
+ MT_RX_SEL1,
+ MT_RX_SEL2, /* monitor chain */
+};
+
+enum mt7915_rdd_cmd {
+ RDD_STOP,
+ RDD_START,
+ RDD_DET_MODE,
+ RDD_RADAR_EMULATE,
+ RDD_START_TXQ = 20,
+ RDD_SET_WF_ANT = 30,
+ RDD_CAC_START = 50,
+ RDD_CAC_END,
+ RDD_NORMAL_START,
+ RDD_DISABLE_DFS_CAL,
+ RDD_PULSE_DBG,
+ RDD_READ_PULSE,
+ RDD_RESUME_BF,
+ RDD_IRQ_OFF,
+};
+
+static inline struct mt7915_phy *
+mt7915_hw_phy(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return phy->priv;
+}
+
+static inline struct mt7915_dev *
+mt7915_hw_dev(struct ieee80211_hw *hw)
+{
+ struct mt76_phy *phy = hw->priv;
+
+ return container_of(phy->dev, struct mt7915_dev, mt76);
+}
+
+static inline struct mt7915_phy *
+mt7915_ext_phy(struct mt7915_dev *dev)
+{
+ struct mt76_phy *phy = dev->mt76.phys[MT_BAND1];
+
+ if (!phy)
+ return NULL;
+
+ return phy->priv;
+}
+
+static inline u32 mt7915_check_adie(struct mt7915_dev *dev, bool sku)
+{
+ u32 mask = sku ? MT_CONNINFRA_SKU_MASK : MT_ADIE_TYPE_MASK;
+ if (!is_mt798x(&dev->mt76))
+ return 0;
+
+ return mt76_rr(dev, MT_CONNINFRA_SKU_DEC_ADDR) & mask;
+}
+
+extern const struct ieee80211_ops mt7915_ops;
+extern const struct mt76_testmode_ops mt7915_testmode_ops;
+extern struct pci_driver mt7915_pci_driver;
+extern struct pci_driver mt7915_hif_driver;
+extern struct platform_driver mt798x_wmac_driver;
+
+#ifdef CONFIG_MT798X_WMAC
+int mt7986_wmac_enable(struct mt7915_dev *dev);
+void mt7986_wmac_disable(struct mt7915_dev *dev);
+#else
+static inline int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+ return 0;
+}
+
+static inline void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+}
+#endif
+struct mt7915_dev *mt7915_mmio_probe(struct device *pdev,
+ void __iomem *mem_base, u32 device_id);
+void mt7915_wfsys_reset(struct mt7915_dev *dev);
+irqreturn_t mt7915_irq_handler(int irq, void *dev_instance);
+u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
+u32 mt7915_wed_init_buf(void *ptr, dma_addr_t phys, int token_id);
+
+int mt7915_register_device(struct mt7915_dev *dev);
+void mt7915_unregister_device(struct mt7915_dev *dev);
+int mt7915_eeprom_init(struct mt7915_dev *dev);
+void mt7915_eeprom_parse_hw_cap(struct mt7915_dev *dev,
+ struct mt7915_phy *phy);
+int mt7915_eeprom_get_target_power(struct mt7915_dev *dev,
+ struct ieee80211_channel *chan,
+ u8 chain_idx);
+s8 mt7915_eeprom_get_power_delta(struct mt7915_dev *dev, int band);
+int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2);
+void mt7915_dma_prefetch(struct mt7915_dev *dev);
+void mt7915_dma_cleanup(struct mt7915_dev *dev);
+int mt7915_dma_reset(struct mt7915_dev *dev, bool force);
+int mt7915_dma_start(struct mt7915_dev *dev, bool reset, bool wed_reset);
+int mt7915_txbf_init(struct mt7915_dev *dev);
+void mt7915_init_txpower(struct mt7915_dev *dev,
+ struct ieee80211_supported_band *sband);
+void mt7915_reset(struct mt7915_dev *dev);
+int mt7915_run(struct ieee80211_hw *hw);
+int mt7915_mcu_init(struct mt7915_dev *dev);
+int mt7915_mcu_init_firmware(struct mt7915_dev *dev);
+int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
+ struct mt7915_vif *mvif,
+ struct mt7915_twt_flow *flow,
+ int cmd);
+int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif, bool enable);
+int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif, int enable);
+int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enable);
+int mt7915_mcu_add_tx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
+ struct ieee80211_ampdu_params *params,
+ bool add);
+int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *he_bss_color);
+int mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ u32 changed);
+int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int enable, u32 changed);
+int mt7915_mcu_add_obss_spr(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_he_obss_pd *he_obss_pd);
+int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool changed);
+int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int mt7915_set_channel(struct mt7915_phy *phy);
+int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd);
+int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif);
+int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *req);
+int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ void *data, u32 field);
+int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
+int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
+int mt7915_mcu_get_eeprom_free_block(struct mt7915_dev *dev, u8 *block_num);
+int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
+ bool hdr_trans);
+int mt7915_mcu_set_test_param(struct mt7915_dev *dev, u8 param, bool test_mode,
+ u8 en);
+int mt7915_mcu_set_ser(struct mt7915_dev *dev, u8 action, u8 set, u8 band);
+int mt7915_mcu_set_sku_en(struct mt7915_phy *phy, bool enable);
+int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy);
+int mt7915_mcu_get_txpower_sku(struct mt7915_phy *phy, s8 *txpower, int len);
+int mt7915_mcu_set_txpower_frame_min(struct mt7915_phy *phy, s8 txpower);
+int mt7915_mcu_set_txpower_frame(struct mt7915_phy *phy,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, s8 txpower);
+int mt7915_mcu_set_txbf(struct mt7915_dev *dev, u8 action);
+int mt7915_mcu_set_fcc5_lpn(struct mt7915_dev *dev, int val);
+int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
+ const struct mt7915_dfs_pulse *pulse);
+int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
+ const struct mt7915_dfs_pattern *pattern);
+int mt7915_mcu_set_muru_ctrl(struct mt7915_dev *dev, u32 cmd, u32 val);
+int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev);
+int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
+int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch);
+int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
+int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
+int mt7915_mcu_set_thermal_protect(struct mt7915_phy *phy);
+int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct rate_info *rate);
+int mt7915_mcu_rdd_background_enable(struct mt7915_phy *phy,
+ struct cfg80211_chan_def *chandef);
+int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wcid);
+int mt7915_mcu_rf_regval(struct mt7915_dev *dev, u32 regidx, u32 *val, bool set);
+int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
+int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
+int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
+void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
+void mt7915_mcu_exit(struct mt7915_dev *dev);
+
+static inline u16 mt7915_wtbl_size(struct mt7915_dev *dev)
+{
+ return is_mt7915(&dev->mt76) ? MT7915_WTBL_SIZE : MT7916_WTBL_SIZE;
+}
+
+static inline u16 mt7915_eeprom_size(struct mt7915_dev *dev)
+{
+ return is_mt7915(&dev->mt76) ? MT7915_EEPROM_SIZE : MT7916_EEPROM_SIZE;
+}
+
+void mt7915_dual_hif_set_irq_mask(struct mt7915_dev *dev, bool write_reg,
+ u32 clear, u32 set);
+
+static inline void mt7915_irq_enable(struct mt7915_dev *dev, u32 mask)
+{
+ if (dev->hif2)
+ mt7915_dual_hif_set_irq_mask(dev, false, 0, mask);
+ else
+ mt76_set_irq_mask(&dev->mt76, 0, 0, mask);
+
+ tasklet_schedule(&dev->mt76.irq_tasklet);
+}
+
+static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
+{
+ if (dev->hif2)
+ mt7915_dual_hif_set_irq_mask(dev, true, mask, 0);
+ else
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+}
+
+void mt7915_memcpy_fromio(struct mt7915_dev *dev, void *buf, u32 offset,
+ size_t len);
+
+void mt7915_mac_init(struct mt7915_dev *dev);
+u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw);
+bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
+void mt7915_mac_reset_counters(struct mt7915_phy *phy);
+void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
+void mt7915_mac_enable_nf(struct mt7915_dev *dev, bool ext_phy);
+void mt7915_mac_enable_rtscts(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif, bool enable);
+void mt7915_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
+ struct ieee80211_key_conf *key,
+ enum mt76_txq_id qid, u32 changed);
+void mt7915_mac_set_timing(struct mt7915_phy *phy);
+int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void mt7915_mac_work(struct work_struct *work);
+void mt7915_mac_reset_work(struct work_struct *work);
+void mt7915_mac_dump_work(struct work_struct *work);
+void mt7915_mac_sta_rc_work(struct work_struct *work);
+void mt7915_mac_update_stats(struct mt7915_phy *phy);
+void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
+ struct mt7915_sta *msta,
+ u8 flowid);
+void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt);
+int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb, u32 *info);
+bool mt7915_rx_check(struct mt76_dev *mdev, void *data, int len);
+void mt7915_stats_work(struct work_struct *work);
+int mt76_dfs_start_rdd(struct mt7915_dev *dev, bool force);
+int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy);
+void mt7915_set_stream_he_caps(struct mt7915_phy *phy);
+void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
+void mt7915_update_channel(struct mt76_phy *mphy);
+int mt7915_mcu_muru_debug_set(struct mt7915_dev *dev, bool enable);
+int mt7915_mcu_muru_debug_get(struct mt7915_phy *phy);
+int mt7915_mcu_wed_enable_rx_stats(struct mt7915_dev *dev);
+int mt7915_init_debugfs(struct mt7915_phy *phy);
+void mt7915_debugfs_rx_fw_monitor(struct mt7915_dev *dev, const void *data, int len);
+bool mt7915_debugfs_rx_log(struct mt7915_dev *dev, const void *data, int len);
+#ifdef CONFIG_MAC80211_DEBUGFS
+void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+#endif
+int mt7915_mmio_wed_init(struct mt7915_dev *dev, void *pdev_ptr,
+ bool pci, int *irq);
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
new file mode 100644
index 0000000000..39132894e8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc.
+ *
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "mt7915.h"
+#include "mac.h"
+#include "../trace.h"
+
+static LIST_HEAD(hif_list);
+static DEFINE_SPINLOCK(hif_lock);
+static u32 hif_idx;
+
+static const struct pci_device_id mt7915_pci_device_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7915) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7906) },
+ { },
+};
+
+static const struct pci_device_id mt7915_hif_device_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7916) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x790a) },
+ { },
+};
+
+static struct mt7915_hif *mt7915_pci_get_hif2(u32 idx)
+{
+ struct mt7915_hif *hif;
+ u32 val;
+
+ spin_lock_bh(&hif_lock);
+
+ list_for_each_entry(hif, &hif_list, list) {
+ val = readl(hif->regs + MT_PCIE_RECOG_ID);
+ val &= MT_PCIE_RECOG_ID_MASK;
+ if (val != idx)
+ continue;
+
+ get_device(hif->dev);
+ goto out;
+ }
+ hif = NULL;
+
+out:
+ spin_unlock_bh(&hif_lock);
+
+ return hif;
+}
+
+static void mt7915_put_hif2(struct mt7915_hif *hif)
+{
+ if (!hif)
+ return;
+
+ put_device(hif->dev);
+}
+
+static struct mt7915_hif *mt7915_pci_init_hif2(struct pci_dev *pdev)
+{
+ struct pci_dev *tmp_pdev;
+
+ hif_idx++;
+
+ tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x7916, NULL);
+ if (!tmp_pdev) {
+ tmp_pdev = pci_get_device(PCI_VENDOR_ID_MEDIATEK, 0x790a, NULL);
+ if (!tmp_pdev)
+ return NULL;
+ }
+ pci_dev_put(tmp_pdev);
+
+ writel(hif_idx | MT_PCIE_RECOG_ID_SEM,
+ pcim_iomap_table(pdev)[0] + MT_PCIE_RECOG_ID);
+
+ return mt7915_pci_get_hif2(hif_idx);
+}
+
+static int mt7915_pci_hif2_probe(struct pci_dev *pdev)
+{
+ struct mt7915_hif *hif;
+
+ hif = devm_kzalloc(&pdev->dev, sizeof(*hif), GFP_KERNEL);
+ if (!hif)
+ return -ENOMEM;
+
+ hif->dev = &pdev->dev;
+ hif->regs = pcim_iomap_table(pdev)[0];
+ hif->irq = pdev->irq;
+ spin_lock_bh(&hif_lock);
+ list_add(&hif->list, &hif_list);
+ spin_unlock_bh(&hif_lock);
+ pci_set_drvdata(pdev, hif);
+
+ return 0;
+}
+
+static int mt7915_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct mt7915_hif *hif2 = NULL;
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int irq;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ mt76_pci_disable_aspm(pdev);
+
+ if (id->device == 0x7916 || id->device == 0x790a)
+ return mt7915_pci_hif2_probe(pdev);
+
+ dev = mt7915_mmio_probe(&pdev->dev, pcim_iomap_table(pdev)[0],
+ id->device);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ mdev = &dev->mt76;
+ mt7915_wfsys_reset(dev);
+ hif2 = mt7915_pci_init_hif2(pdev);
+
+ ret = mt7915_mmio_wed_init(dev, pdev, true, &irq);
+ if (ret < 0)
+ goto free_wed_or_irq_vector;
+
+ if (!ret) {
+ hif2 = mt7915_pci_init_hif2(pdev);
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0)
+ goto free_device;
+
+ irq = pdev->irq;
+ }
+
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto free_wed_or_irq_vector;
+
+ /* master switch of PCIe tnterrupt enable */
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+
+ if (hif2) {
+ dev->hif2 = hif2;
+
+ mt76_wr(dev, MT_INT1_MASK_CSR, 0);
+ /* master switch of PCIe tnterrupt enable */
+ if (is_mt7915(mdev))
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE, 0xff);
+ else
+ mt76_wr(dev, MT_PCIE1_MAC_INT_ENABLE_MT7916, 0xff);
+
+ ret = devm_request_irq(mdev->dev, dev->hif2->irq,
+ mt7915_irq_handler, IRQF_SHARED,
+ KBUILD_MODNAME "-hif", dev);
+ if (ret)
+ goto free_hif2;
+ }
+
+ ret = mt7915_register_device(dev);
+ if (ret)
+ goto free_hif2_irq;
+
+ return 0;
+
+free_hif2_irq:
+ if (dev->hif2)
+ devm_free_irq(mdev->dev, dev->hif2->irq, dev);
+free_hif2:
+ if (dev->hif2)
+ put_device(dev->hif2->dev);
+ devm_free_irq(mdev->dev, irq, dev);
+free_wed_or_irq_vector:
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mtk_wed_device_detach(&mdev->mmio.wed);
+ else
+ pci_free_irq_vectors(pdev);
+free_device:
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt7915_hif_remove(struct pci_dev *pdev)
+{
+ struct mt7915_hif *hif = pci_get_drvdata(pdev);
+
+ list_del(&hif->list);
+}
+
+static void mt7915_pci_remove(struct pci_dev *pdev)
+{
+ struct mt76_dev *mdev;
+ struct mt7915_dev *dev;
+
+ mdev = pci_get_drvdata(pdev);
+ dev = container_of(mdev, struct mt7915_dev, mt76);
+ mt7915_put_hif2(dev->hif2);
+ mt7915_unregister_device(dev);
+}
+
+struct pci_driver mt7915_hif_driver = {
+ .name = KBUILD_MODNAME "_hif",
+ .id_table = mt7915_hif_device_table,
+ .probe = mt7915_pci_probe,
+ .remove = mt7915_hif_remove,
+};
+
+struct pci_driver mt7915_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = mt7915_pci_device_table,
+ .probe = mt7915_pci_probe,
+ .remove = mt7915_pci_remove,
+};
+
+MODULE_DEVICE_TABLE(pci, mt7915_pci_device_table);
+MODULE_DEVICE_TABLE(pci, mt7915_hif_device_table);
+MODULE_FIRMWARE(MT7915_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7915_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7915_ROM_PATCH);
+MODULE_FIRMWARE(MT7916_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7916_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7916_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
new file mode 100644
index 0000000000..588cd87e24
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -0,0 +1,1221 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_REGS_H
+#define __MT7915_REGS_H
+
+/* used to differentiate between generations */
+struct mt7915_reg_desc {
+ const u32 *reg_rev;
+ const u32 *offs_rev;
+ const struct mt76_connac_reg_map *map;
+ u32 map_size;
+};
+
+enum reg_rev {
+ INT_SOURCE_CSR,
+ INT_MASK_CSR,
+ INT1_SOURCE_CSR,
+ INT1_MASK_CSR,
+ INT_MCU_CMD_SOURCE,
+ INT_MCU_CMD_EVENT,
+ WFDMA0_ADDR,
+ WFDMA0_PCIE1_ADDR,
+ WFDMA_EXT_CSR_ADDR,
+ CBTOP1_PHY_END,
+ INFRA_MCU_ADDR_END,
+ FW_ASSERT_STAT_ADDR,
+ FW_EXCEPT_TYPE_ADDR,
+ FW_EXCEPT_COUNT_ADDR,
+ FW_CIRQ_COUNT_ADDR,
+ FW_CIRQ_IDX_ADDR,
+ FW_CIRQ_LISR_ADDR,
+ FW_TASK_ID_ADDR,
+ FW_TASK_IDX_ADDR,
+ FW_TASK_QID1_ADDR,
+ FW_TASK_QID2_ADDR,
+ FW_TASK_START_ADDR,
+ FW_TASK_END_ADDR,
+ FW_TASK_SIZE_ADDR,
+ FW_LAST_MSG_ID_ADDR,
+ FW_EINT_INFO_ADDR,
+ FW_SCHED_INFO_ADDR,
+ SWDEF_BASE_ADDR,
+ TXQ_WED_RING_BASE,
+ RXQ_WED_RING_BASE,
+ RXQ_WED_DATA_RING_BASE,
+ __MT_REG_MAX,
+};
+
+enum offs_rev {
+ TMAC_CDTR,
+ TMAC_ODTR,
+ TMAC_ATCR,
+ TMAC_TRCR0,
+ TMAC_ICR0,
+ TMAC_ICR1,
+ TMAC_CTCR0,
+ TMAC_TFCR0,
+ MDP_BNRCFR0,
+ MDP_BNRCFR1,
+ ARB_DRNGR0,
+ ARB_SCR,
+ RMAC_MIB_AIRTIME14,
+ AGG_AWSCR0,
+ AGG_PCR0,
+ AGG_ACR0,
+ AGG_ACR4,
+ AGG_MRCR,
+ AGG_ATCR1,
+ AGG_ATCR3,
+ LPON_UTTR0,
+ LPON_UTTR1,
+ LPON_FRCR,
+ MIB_SDR3,
+ MIB_SDR4,
+ MIB_SDR5,
+ MIB_SDR7,
+ MIB_SDR8,
+ MIB_SDR9,
+ MIB_SDR10,
+ MIB_SDR11,
+ MIB_SDR12,
+ MIB_SDR13,
+ MIB_SDR14,
+ MIB_SDR15,
+ MIB_SDR16,
+ MIB_SDR17,
+ MIB_SDR18,
+ MIB_SDR19,
+ MIB_SDR20,
+ MIB_SDR21,
+ MIB_SDR22,
+ MIB_SDR23,
+ MIB_SDR24,
+ MIB_SDR25,
+ MIB_SDR27,
+ MIB_SDR28,
+ MIB_SDR29,
+ MIB_SDRVEC,
+ MIB_SDR31,
+ MIB_SDR32,
+ MIB_SDRMUBF,
+ MIB_DR8,
+ MIB_DR9,
+ MIB_DR11,
+ MIB_MB_SDR0,
+ MIB_MB_SDR1,
+ TX_AGG_CNT,
+ TX_AGG_CNT2,
+ MIB_ARNG,
+ WTBLON_TOP_WDUCR,
+ WTBL_UPDATE,
+ PLE_FL_Q_EMPTY,
+ PLE_FL_Q_CTRL,
+ PLE_AC_QEMPTY,
+ PLE_FREEPG_CNT,
+ PLE_FREEPG_HEAD_TAIL,
+ PLE_PG_HIF_GROUP,
+ PLE_HIF_PG_INFO,
+ AC_OFFSET,
+ ETBF_PAR_RPT0,
+ __MT_OFFS_MAX,
+};
+
+#define __REG(id) (dev->reg.reg_rev[(id)])
+#define __OFFS(id) (dev->reg.offs_rev[(id)])
+
+/* MCU WFDMA0 */
+#define MT_MCU_WFDMA0_BASE 0x2000
+#define MT_MCU_WFDMA0(ofs) (MT_MCU_WFDMA0_BASE + (ofs))
+
+#define MT_MCU_WFDMA0_DUMMY_CR MT_MCU_WFDMA0(0x120)
+
+/* MCU WFDMA1 */
+#define MT_MCU_WFDMA1_BASE 0x3000
+#define MT_MCU_WFDMA1(ofs) (MT_MCU_WFDMA1_BASE + (ofs))
+
+#define MT_MCU_INT_EVENT __REG(INT_MCU_CMD_EVENT)
+#define MT_MCU_INT_EVENT_DMA_STOPPED BIT(0)
+#define MT_MCU_INT_EVENT_DMA_INIT BIT(1)
+#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
+#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
+
+/* PLE */
+#define MT_PLE_BASE 0x820c0000
+#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
+
+#define MT_PLE_HOST_RPT0 MT_PLE(0x030)
+#define MT_PLE_HOST_RPT0_TX_LATENCY BIT(3)
+
+#define MT_FL_Q_EMPTY MT_PLE(__OFFS(PLE_FL_Q_EMPTY))
+#define MT_FL_Q0_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL))
+#define MT_FL_Q2_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0x8)
+#define MT_FL_Q3_CTRL MT_PLE(__OFFS(PLE_FL_Q_CTRL) + 0xc)
+
+#define MT_PLE_FREEPG_CNT MT_PLE(__OFFS(PLE_FREEPG_CNT))
+#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(__OFFS(PLE_FREEPG_HEAD_TAIL))
+#define MT_PLE_PG_HIF_GROUP MT_PLE(__OFFS(PLE_PG_HIF_GROUP))
+#define MT_PLE_HIF_PG_INFO MT_PLE(__OFFS(PLE_HIF_PG_INFO))
+
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(__OFFS(PLE_AC_QEMPTY) + \
+ __OFFS(AC_OFFSET) * \
+ (ac) + ((n) << 2))
+#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
+
+#define MT_PSE_BASE 0x820c8000
+#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
+
+/* WF MDP TOP */
+#define MT_MDP_BASE 0x820cd000
+#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
+
+#define MT_MDP_DCR0 MT_MDP(0x000)
+#define MT_MDP_DCR0_DAMSDU_EN BIT(15)
+
+#define MT_MDP_DCR1 MT_MDP(0x004)
+#define MT_MDP_DCR1_MAX_RX_LEN GENMASK(15, 3)
+
+#define MT_MDP_DCR2 MT_MDP(0x0e8)
+#define MT_MDP_DCR2_RX_TRANS_SHORT BIT(2)
+
+#define MT_MDP_BNRCFR0(_band) MT_MDP(__OFFS(MDP_BNRCFR0) + \
+ ((_band) << 8))
+#define MT_MDP_RCFR0_MCU_RX_MGMT GENMASK(5, 4)
+#define MT_MDP_RCFR0_MCU_RX_CTL_NON_BAR GENMASK(7, 6)
+#define MT_MDP_RCFR0_MCU_RX_CTL_BAR GENMASK(9, 8)
+
+#define MT_MDP_BNRCFR1(_band) MT_MDP(__OFFS(MDP_BNRCFR1) + \
+ ((_band) << 8))
+#define MT_MDP_RCFR1_MCU_RX_BYPASS GENMASK(23, 22)
+#define MT_MDP_RCFR1_RX_DROPPED_UCAST GENMASK(28, 27)
+#define MT_MDP_RCFR1_RX_DROPPED_MCAST GENMASK(30, 29)
+#define MT_MDP_TO_HIF 0
+#define MT_MDP_TO_WM 1
+
+/* TRB: band 0(0x820e1000), band 1(0x820f1000) */
+#define MT_WF_TRB_BASE(_band) ((_band) ? 0x820f1000 : 0x820e1000)
+#define MT_WF_TRB(_band, ofs) (MT_WF_TRB_BASE(_band) + (ofs))
+
+#define MT_TRB_RXPSR0(_band) MT_WF_TRB(_band, 0x03c)
+#define MT_TRB_RXPSR0_RX_WTBL_PTR GENMASK(25, 16)
+#define MT_TRB_RXPSR0_RX_RMAC_PTR GENMASK(9, 0)
+
+/* TMAC: band 0(0x820e4000), band 1(0x820f4000) */
+#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
+#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
+
+#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
+#define MT_TMAC_TCR0_TX_BLINK GENMASK(7, 6)
+#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25)
+
+#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CDTR))
+ #define MT_TMAC_ODTR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ODTR))
+#define MT_TIMEOUT_VAL_PLCP GENMASK(15, 0)
+#define MT_TIMEOUT_VAL_CCA GENMASK(31, 16)
+
+#define MT_TMAC_ATCR(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ATCR))
+#define MT_TMAC_ATCR_TXV_TOUT GENMASK(7, 0)
+
+#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TRCR0))
+#define MT_TMAC_TRCR0_TR2T_CHK GENMASK(8, 0)
+#define MT_TMAC_TRCR0_I2T_CHK GENMASK(24, 16)
+
+#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR0))
+#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
+#define MT_IFS_RIFS GENMASK(14, 10)
+#define MT_IFS_SIFS GENMASK(22, 16)
+#define MT_IFS_SLOT GENMASK(30, 24)
+
+#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, __OFFS(TMAC_ICR1))
+#define MT_IFS_EIFS_CCK GENMASK(8, 0)
+
+#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_CTCR0))
+#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
+#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
+#define MT_TMAC_CTCR0_INS_DDLMT_VHT_SMPDU_EN BIT(18)
+
+#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, __OFFS(TMAC_TFCR0))
+
+/* WF DMA TOP: band 0(0x820e7000),band 1(0x820f7000) */
+#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000)
+#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
+
+#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
+#define MT_DMA_DCR0_MAX_RX_LEN GENMASK(15, 3)
+#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
+
+/* WTBLOFF TOP: band 0(0x820e9000),band 1(0x820f9000) */
+#define MT_WTBLOFF_TOP_BASE(_band) ((_band) ? 0x820f9000 : 0x820e9000)
+#define MT_WTBLOFF_TOP(_band, ofs) (MT_WTBLOFF_TOP_BASE(_band) + (ofs))
+
+#define MT_WTBLOFF_TOP_RSCR(_band) MT_WTBLOFF_TOP(_band, 0x008)
+#define MT_WTBLOFF_TOP_RSCR_RCPI_MODE GENMASK(31, 30)
+#define MT_WTBLOFF_TOP_RSCR_RCPI_PARAM GENMASK(25, 24)
+
+/* ETBF: band 0(0x820ea000), band 1(0x820fa000) */
+#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
+#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
+
+#define MT_ETBF_TX_NDP_BFRP(_band) MT_WF_ETBF(_band, 0x040)
+#define MT_ETBF_TX_FB_CPL GENMASK(31, 16)
+#define MT_ETBF_TX_FB_TRI GENMASK(15, 0)
+
+#define MT_ETBF_PAR_RPT0(_band) MT_WF_ETBF(_band, __OFFS(ETBF_PAR_RPT0))
+#define MT_ETBF_PAR_RPT0_FB_BW GENMASK(7, 6)
+#define MT_ETBF_PAR_RPT0_FB_NC GENMASK(5, 3)
+#define MT_ETBF_PAR_RPT0_FB_NR GENMASK(2, 0)
+
+#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x0f0)
+#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16)
+#define MT_ETBF_TX_EBF_CNT GENMASK(15, 0)
+
+#define MT_ETBF_RX_FB_CNT(_band) MT_WF_ETBF(_band, 0x0f8)
+#define MT_ETBF_RX_FB_ALL GENMASK(31, 24)
+#define MT_ETBF_RX_FB_HE GENMASK(23, 16)
+#define MT_ETBF_RX_FB_VHT GENMASK(15, 8)
+#define MT_ETBF_RX_FB_HT GENMASK(7, 0)
+
+/* LPON: band 0(0x820eb000), band 1(0x820fb000) */
+#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
+#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
+
+#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR0))
+#define MT_LPON_UTTR1(_band) MT_WF_LPON(_band, __OFFS(LPON_UTTR1))
+#define MT_LPON_FRCR(_band) MT_WF_LPON(_band, __OFFS(LPON_FRCR))
+
+#define MT_LPON_TCR(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 1))
+#define MT_LPON_TCR_MT7916(_band, n) MT_WF_LPON(_band, 0x0a8 + \
+ (((n) * 4) << 4))
+#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
+#define MT_LPON_TCR_SW_WRITE BIT(0)
+#define MT_LPON_TCR_SW_ADJUST BIT(1)
+#define MT_LPON_TCR_SW_READ GENMASK(1, 0)
+
+/* MIB: band 0(0x820ed000), band 1(0x820fd000) */
+/* These counters are (mostly?) clear-on-read. So, some should not
+ * be read at all in case firmware is already reading them. These
+ * are commented with 'DNR' below. The DNR stats will be read by querying
+ * the firmware API for the appropriate message. For counters the driver
+ * does read, the driver should accumulate the counters.
+ */
+#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000)
+#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
+
+#define MT_MIB_SDR0(_band) MT_WF_MIB(_band, 0x010)
+#define MT_MIB_SDR0_BERACON_TX_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR3))
+#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+#define MT_MIB_SDR3_FCS_ERR_MASK_MT7916 GENMASK(31, 16)
+
+#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR4))
+#define MT_MIB_SDR4_RX_FIFO_FULL_MASK GENMASK(15, 0)
+
+/* rx mpdu counter, full 32 bits */
+#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR5))
+
+#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020)
+#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR7))
+#define MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR8))
+#define MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK GENMASK(15, 0)
+
+/* aka CCA_NAV_TX_TIME */
+#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR9))
+#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR10(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR10))
+#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+#define MT_MIB_SDR10_MRDY_COUNT_MASK_MT7916 GENMASK(31, 0)
+
+#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR11))
+#define MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK GENMASK(15, 0)
+
+/* tx ampdu cnt, full 32 bits */
+#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR12))
+
+#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR13))
+#define MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK GENMASK(15, 0)
+
+/* counts all mpdus in ampdu, regardless of success */
+#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR14))
+#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK_MT7916 GENMASK(31, 0)
+
+/* counts all successfully tx'd mpdus in ampdu */
+#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR15))
+#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK_MT7916 GENMASK(31, 0)
+
+/* in units of 'us' */
+#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR16))
+#define MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR17(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR17))
+#define MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR18))
+#define MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK GENMASK(23, 0)
+
+/* units are us */
+#define MT_MIB_SDR19(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR19))
+#define MT_MIB_SDR19_CCK_MDRDY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR20(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR20))
+#define MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR21(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR21))
+#define MT_MIB_SDR21_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
+
+/* rx ampdu count, 32-bit */
+#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR22))
+
+/* rx ampdu bytes count, 32-bit */
+#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR23))
+
+/* rx ampdu valid subframe count */
+#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR24))
+#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK GENMASK(23, 0)
+#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK_MT7916 GENMASK(31, 0)
+
+/* rx ampdu valid subframe bytes count, 32bits */
+#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR25))
+
+/* remaining windows protected stats */
+#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR27))
+#define MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR28))
+#define MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR29))
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK_MT7916 GENMASK(15, 0)
+
+#define MT_MIB_SDRVEC(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRVEC))
+#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK GENMASK(15, 0)
+#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK_MT7916 GENMASK(31, 16)
+
+/* rx blockack count, 32 bits */
+#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR31))
+
+#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, __OFFS(MIB_SDR32))
+#define MT_MIB_SDR32_TX_PKT_EBF_CNT GENMASK(15, 0)
+#define MT_MIB_SDR32_TX_PKT_IBF_CNT GENMASK(31, 16)
+
+#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x088)
+#define MT_MIB_SDR33_TX_PKT_IBF_CNT GENMASK(15, 0)
+
+#define MT_MIB_SDRMUBF(_band) MT_WF_MIB(_band, __OFFS(MIB_SDRMUBF))
+#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
+
+/* 36, 37 both DNR */
+
+#define MT_MIB_DR8(_band) MT_WF_MIB(_band, __OFFS(MIB_DR8))
+#define MT_MIB_DR9(_band) MT_WF_MIB(_band, __OFFS(MIB_DR9))
+#define MT_MIB_DR11(_band) MT_WF_MIB(_band, __OFFS(MIB_DR11))
+
+#define MT_MIB_MB_SDR0(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR0) + (n))
+#define MT_MIB_RTS_RETRIES_COUNT_MASK GENMASK(31, 16)
+#define MT_MIB_RTS_COUNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_MB_SDR1(_band, n) MT_WF_MIB(_band, __OFFS(MIB_MB_SDR1) + (n))
+#define MT_MIB_BA_MISS_COUNT_MASK GENMASK(15, 0)
+#define MT_MIB_ACK_FAIL_COUNT_MASK GENMASK(31, 16)
+
+#define MT_MIB_MB_SDR2(_band, n) MT_WF_MIB(_band, 0x518 + (n))
+#define MT_MIB_MB_BFTF(_band, n) MT_WF_MIB(_band, 0x510 + (n))
+
+#define MT_TX_AGG_CNT(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT) + \
+ ((n) << 2))
+#define MT_TX_AGG_CNT2(_band, n) MT_WF_MIB(_band, __OFFS(TX_AGG_CNT2) + \
+ ((n) << 2))
+#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, __OFFS(MIB_ARNG) + \
+ ((n) << 2))
+#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
+
+#define MT_MIB_BFCR0(_band) MT_WF_MIB(_band, 0x7b0)
+#define MT_MIB_BFCR0_RX_FB_HT GENMASK(15, 0)
+#define MT_MIB_BFCR0_RX_FB_VHT GENMASK(31, 16)
+
+#define MT_MIB_BFCR1(_band) MT_WF_MIB(_band, 0x7b4)
+#define MT_MIB_BFCR1_RX_FB_HE GENMASK(15, 0)
+
+#define MT_MIB_BFCR2(_band) MT_WF_MIB(_band, 0x7b8)
+#define MT_MIB_BFCR2_BFEE_TX_FB_TRIG GENMASK(15, 0)
+
+#define MT_MIB_BFCR7(_band) MT_WF_MIB(_band, 0x7cc)
+#define MT_MIB_BFCR7_BFEE_TX_FB_CPL GENMASK(15, 0)
+
+/* WTBLON TOP */
+#define MT_WTBLON_TOP_BASE 0x820d4000
+#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
+#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(__OFFS(WTBLON_TOP_WDUCR))
+#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
+
+#define MT_WTBL_UPDATE MT_WTBLON_TOP(__OFFS(WTBL_UPDATE))
+#define MT_WTBL_UPDATE_WLAN_IDX GENMASK(9, 0)
+#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
+#define MT_WTBL_UPDATE_BUSY BIT(31)
+
+/* WTBL */
+#define MT_WTBL_BASE 0x820d8000
+#define MT_WTBL_LMAC_ID GENMASK(14, 8)
+#define MT_WTBL_LMAC_DW GENMASK(7, 2)
+#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
+ FIELD_PREP(MT_WTBL_LMAC_ID, _id) | \
+ FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
+
+/* AGG: band 0(0x820e2000), band 1(0x820f2000) */
+#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
+#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
+
+#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_AWSCR0) + \
+ (_n) * 4))
+#define MT_AGG_PCR0(_band, _n) MT_WF_AGG(_band, (__OFFS(AGG_PCR0) + \
+ (_n) * 4))
+#define MT_AGG_PCR0_MM_PROT BIT(0)
+#define MT_AGG_PCR0_GF_PROT BIT(1)
+#define MT_AGG_PCR0_BW20_PROT BIT(2)
+#define MT_AGG_PCR0_BW40_PROT BIT(4)
+#define MT_AGG_PCR0_BW80_PROT BIT(6)
+#define MT_AGG_PCR0_ERP_PROT GENMASK(12, 8)
+#define MT_AGG_PCR0_VHT_PROT BIT(13)
+#define MT_AGG_PCR0_PTA_WIN_DIS BIT(15)
+
+#define MT_AGG_PCR1_RTS0_NUM_THRES GENMASK(31, 23)
+#define MT_AGG_PCR1_RTS0_LEN_THRES GENMASK(19, 0)
+
+#define MT_AGG_ACR0(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR0))
+#define MT_AGG_ACR_CFEND_RATE GENMASK(13, 0)
+#define MT_AGG_ACR_BAR_RATE GENMASK(29, 16)
+
+#define MT_AGG_ACR4(_band) MT_WF_AGG(_band, __OFFS(AGG_ACR4))
+#define MT_AGG_ACR_PPDU_TXS2H BIT(1)
+
+#define MT_AGG_MRCR(_band) MT_WF_AGG(_band, __OFFS(AGG_MRCR))
+#define MT_AGG_MRCR_BAR_CNT_LIMIT GENMASK(15, 12)
+#define MT_AGG_MRCR_LAST_RTS_CTS_RN BIT(6)
+#define MT_AGG_MRCR_RTS_FAIL_LIMIT GENMASK(11, 7)
+#define MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT GENMASK(28, 24)
+
+#define MT_AGG_ATCR1(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR1))
+#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, __OFFS(AGG_ATCR3))
+
+/* ARB: band 0(0x820e3000), band 1(0x820f3000) */
+#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000)
+#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
+
+#define MT_ARB_SCR(_band) MT_WF_ARB(_band, __OFFS(ARB_SCR))
+#define MT_ARB_SCR_TX_DISABLE BIT(8)
+#define MT_ARB_SCR_RX_DISABLE BIT(9)
+
+#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, (__OFFS(ARB_DRNGR0) + \
+ (_n) * 4))
+
+/* RMAC: band 0(0x820e5000), band 1(0x820f5000) */
+#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000)
+#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
+
+#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
+#define MT_WF_RFCR_DROP_STBC_MULTI BIT(0)
+#define MT_WF_RFCR_DROP_FCSFAIL BIT(1)
+#define MT_WF_RFCR_DROP_VERSION BIT(3)
+#define MT_WF_RFCR_DROP_PROBEREQ BIT(4)
+#define MT_WF_RFCR_DROP_MCAST BIT(5)
+#define MT_WF_RFCR_DROP_BCAST BIT(6)
+#define MT_WF_RFCR_DROP_MCAST_FILTERED BIT(7)
+#define MT_WF_RFCR_DROP_A3_MAC BIT(8)
+#define MT_WF_RFCR_DROP_A3_BSSID BIT(9)
+#define MT_WF_RFCR_DROP_A2_BSSID BIT(10)
+#define MT_WF_RFCR_DROP_OTHER_BEACON BIT(11)
+#define MT_WF_RFCR_DROP_FRAME_REPORT BIT(12)
+#define MT_WF_RFCR_DROP_CTL_RSV BIT(13)
+#define MT_WF_RFCR_DROP_CTS BIT(14)
+#define MT_WF_RFCR_DROP_RTS BIT(15)
+#define MT_WF_RFCR_DROP_DUPLICATE BIT(16)
+#define MT_WF_RFCR_DROP_OTHER_BSS BIT(17)
+#define MT_WF_RFCR_DROP_OTHER_UC BIT(18)
+#define MT_WF_RFCR_DROP_OTHER_TIM BIT(19)
+#define MT_WF_RFCR_DROP_NDPA BIT(20)
+#define MT_WF_RFCR_DROP_UNWANTED_CTL BIT(21)
+
+#define MT_WF_RFCR1(_band) MT_WF_RMAC(_band, 0x004)
+#define MT_WF_RFCR1_DROP_ACK BIT(4)
+#define MT_WF_RFCR1_DROP_BF_POLL BIT(5)
+#define MT_WF_RFCR1_DROP_BA BIT(6)
+#define MT_WF_RFCR1_DROP_CFEND BIT(7)
+#define MT_WF_RFCR1_DROP_CFACK BIT(8)
+
+#define MT_WF_RMAC_RSVD0(_band) MT_WF_RMAC(_band, 0x02e0)
+#define MT_WF_RMAC_RSVD0_EIFS_CLR BIT(21)
+
+#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
+#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
+#define MT_WF_RMAC_MIB_OBSS_BACKOFF GENMASK(15, 0)
+#define MT_WF_RMAC_MIB_ED_OFFSET GENMASK(20, 16)
+
+#define MT_WF_RMAC_MIB_AIRTIME1(_band) MT_WF_RMAC(_band, 0x0384)
+#define MT_WF_RMAC_MIB_NONQOSD_BACKOFF GENMASK(31, 16)
+
+#define MT_WF_RMAC_MIB_AIRTIME3(_band) MT_WF_RMAC(_band, 0x038c)
+#define MT_WF_RMAC_MIB_QOS01_BACKOFF GENMASK(31, 0)
+
+#define MT_WF_RMAC_MIB_AIRTIME4(_band) MT_WF_RMAC(_band, 0x0390)
+#define MT_WF_RMAC_MIB_QOS23_BACKOFF GENMASK(31, 0)
+
+/* WFDMA0 */
+#define MT_WFDMA0_BASE __REG(WFDMA0_ADDR)
+#define MT_WFDMA0(ofs) (MT_WFDMA0_BASE + (ofs))
+
+#define MT_WFDMA0_RST MT_WFDMA0(0x100)
+#define MT_WFDMA0_RST_LOGIC_RST BIT(4)
+#define MT_WFDMA0_RST_DMASHDL_ALL_RST BIT(5)
+
+#define MT_WFDMA0_BUSY_ENA MT_WFDMA0(0x13c)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA0_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA0_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_WFDMA0_MCU_HOST_INT_ENA MT_WFDMA0(0x1f4)
+
+#define MT_WFDMA0_GLO_CFG MT_WFDMA0(0x208)
+#define MT_WFDMA0_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WFDMA0_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA0_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA0_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
+
+#define MT_WFDMA0_RST_DTX_PTR MT_WFDMA0(0x20c)
+
+#define MT_WFDMA0_EXT0_CFG MT_WFDMA0(0x2b0)
+#define MT_WFDMA0_EXT0_RXWB_KEEP BIT(10)
+
+#define MT_WFDMA0_PRI_DLY_INT_CFG0 MT_WFDMA0(0x2f0)
+#define MT_WFDMA0_PRI_DLY_INT_CFG1 MT_WFDMA0(0x2f4)
+#define MT_WFDMA0_PRI_DLY_INT_CFG2 MT_WFDMA0(0x2f8)
+#define MT_WPDMA_GLO_CFG MT_WFDMA0(0x208)
+
+/* WFDMA1 */
+#define MT_WFDMA1_BASE 0xd5000
+#define MT_WFDMA1(ofs) (MT_WFDMA1_BASE + (ofs))
+
+#define MT_WFDMA1_RST MT_WFDMA1(0x100)
+#define MT_WFDMA1_RST_LOGIC_RST BIT(4)
+#define MT_WFDMA1_RST_DMASHDL_ALL_RST BIT(5)
+
+#define MT_WFDMA1_BUSY_ENA MT_WFDMA1(0x13c)
+#define MT_WFDMA1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA1_BUSY_ENA_RX_FIFO BIT(2)
+
+#define MT_WFDMA1_GLO_CFG MT_WFDMA1(0x208)
+#define MT_WFDMA1_GLO_CFG_TX_DMA_EN BIT(0)
+#define MT_WFDMA1_GLO_CFG_RX_DMA_EN BIT(2)
+#define MT_WFDMA1_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MT_WFDMA1_GLO_CFG_OMIT_RX_INFO_PFET2 BIT(21)
+
+#define MT_WFDMA1_RST_DTX_PTR MT_WFDMA1(0x20c)
+#define MT_WFDMA1_PRI_DLY_INT_CFG0 MT_WFDMA1(0x2f0)
+
+/* WFDMA CSR */
+#define MT_WFDMA_EXT_CSR_BASE __REG(WFDMA_EXT_CSR_ADDR)
+#define MT_WFDMA_EXT_CSR_PHYS_BASE 0x18027000
+#define MT_WFDMA_EXT_CSR(ofs) (MT_WFDMA_EXT_CSR_BASE + (ofs))
+#define MT_WFDMA_EXT_CSR_PHYS(ofs) (MT_WFDMA_EXT_CSR_PHYS_BASE + (ofs))
+
+#define MT_WFDMA_HOST_CONFIG MT_WFDMA_EXT_CSR_PHYS(0x30)
+#define MT_WFDMA_HOST_CONFIG_PDMA_BAND BIT(0)
+#define MT_WFDMA_HOST_CONFIG_WED BIT(1)
+
+#define MT_WFDMA_WED_RING_CONTROL MT_WFDMA_EXT_CSR_PHYS(0x34)
+#define MT_WFDMA_WED_RING_CONTROL_TX0 GENMASK(4, 0)
+#define MT_WFDMA_WED_RING_CONTROL_TX1 GENMASK(12, 8)
+#define MT_WFDMA_WED_RING_CONTROL_RX1 GENMASK(20, 16)
+
+#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR_PHYS(0x44)
+#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
+
+#define MT_PCIE_RECOG_ID 0xd7090
+#define MT_PCIE_RECOG_ID_MASK GENMASK(30, 0)
+#define MT_PCIE_RECOG_ID_SEM BIT(31)
+
+#define MT_INT_WED_SOURCE_CSR MT_WFDMA_EXT_CSR(0x200)
+#define MT_INT_WED_MASK_CSR MT_WFDMA_EXT_CSR(0x204)
+
+#define MT_WED_TX_RING_BASE MT_WFDMA_EXT_CSR(0x300)
+#define MT_WED_RX_RING_BASE MT_WFDMA_EXT_CSR(0x400)
+
+/* WFDMA0 PCIE1 */
+#define MT_WFDMA0_PCIE1_BASE __REG(WFDMA0_PCIE1_ADDR)
+#define MT_WFDMA0_PCIE1(ofs) (MT_WFDMA0_PCIE1_BASE + (ofs))
+
+#define MT_WFDMA0_PCIE1_BUSY_ENA MT_WFDMA0_PCIE1(0x13c)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA0_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
+
+/* WFDMA1 PCIE1 */
+#define MT_WFDMA1_PCIE1_BASE 0xd9000
+#define MT_WFDMA1_PCIE1(ofs) (MT_WFDMA1_PCIE1_BASE + (ofs))
+
+#define MT_WFDMA1_PCIE1_BUSY_ENA MT_WFDMA1_PCIE1(0x13c)
+#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO0 BIT(0)
+#define MT_WFDMA1_PCIE1_BUSY_ENA_TX_FIFO1 BIT(1)
+#define MT_WFDMA1_PCIE1_BUSY_ENA_RX_FIFO BIT(2)
+
+/* WFDMA COMMON */
+#define __RXQ(q) ((q) + __MT_MCUQ_MAX)
+#define __TXQ(q) (__RXQ(q) + MT_RXQ_BAND2)
+
+#define MT_Q_ID(q) (dev->q_id[(q)])
+#define MT_Q_BASE(q) ((dev->wfdma_mask >> (q)) & 0x1 ? \
+ MT_WFDMA1_BASE : MT_WFDMA0_BASE)
+
+#define MT_MCUQ_ID(q) MT_Q_ID(q)
+#define MT_TXQ_ID(q) MT_Q_ID(__TXQ(q))
+#define MT_RXQ_ID(q) MT_Q_ID(__RXQ(q))
+
+#define MT_MCUQ_RING_BASE(q) (MT_Q_BASE(q) + 0x300)
+#define MT_TXQ_RING_BASE(q) (MT_Q_BASE(__TXQ(q)) + 0x300)
+#define MT_RXQ_RING_BASE(q) (MT_Q_BASE(__RXQ(q)) + 0x500)
+
+#define MT_MCUQ_EXT_CTRL(q) (MT_Q_BASE(q) + 0x600 + \
+ MT_MCUQ_ID(q)* 0x4)
+#define MT_RXQ_BAND1_CTRL(q) (MT_Q_BASE(__RXQ(q)) + 0x680 + \
+ MT_RXQ_ID(q)* 0x4)
+#define MT_TXQ_EXT_CTRL(q) (MT_Q_BASE(__TXQ(q)) + 0x600 + \
+ MT_TXQ_ID(q)* 0x4)
+
+#define MT_TXQ_WED_RING_BASE __REG(TXQ_WED_RING_BASE)
+#define MT_RXQ_WED_RING_BASE __REG(RXQ_WED_RING_BASE)
+#define MT_RXQ_WED_DATA_RING_BASE __REG(RXQ_WED_DATA_RING_BASE)
+
+#define MT_INT_SOURCE_CSR __REG(INT_SOURCE_CSR)
+#define MT_INT_MASK_CSR __REG(INT_MASK_CSR)
+
+#define MT_INT1_SOURCE_CSR __REG(INT1_SOURCE_CSR)
+#define MT_INT1_MASK_CSR __REG(INT1_MASK_CSR)
+
+#define MT_INT_RX_DONE_BAND0 BIT(16)
+#define MT_INT_RX_DONE_BAND1 BIT(17)
+#define MT_INT_RX_DONE_WM BIT(0)
+#define MT_INT_RX_DONE_WA BIT(1)
+#define MT_INT_RX_DONE_WA_MAIN BIT(1)
+#define MT_INT_RX_DONE_WA_EXT BIT(2)
+#define MT_INT_MCU_CMD BIT(29)
+#define MT_INT_RX_DONE_BAND0_MT7916 BIT(22)
+#define MT_INT_RX_DONE_BAND1_MT7916 BIT(23)
+#define MT_INT_RX_DONE_WA_MAIN_MT7916 BIT(2)
+#define MT_INT_RX_DONE_WA_EXT_MT7916 BIT(3)
+
+#define MT_INT_WED_RX_DONE_BAND0_MT7916 BIT(18)
+#define MT_INT_WED_RX_DONE_BAND1_MT7916 BIT(19)
+#define MT_INT_WED_RX_DONE_WA_MAIN_MT7916 BIT(1)
+#define MT_INT_WED_RX_DONE_WA_MT7916 BIT(17)
+
+#define MT_INT_RX(q) (dev->q_int_mask[__RXQ(q)])
+#define MT_INT_TX_MCU(q) (dev->q_int_mask[(q)])
+
+#define MT_INT_RX_DONE_MCU (MT_INT_RX(MT_RXQ_MCU) | \
+ MT_INT_RX(MT_RXQ_MCU_WA))
+
+#define MT_INT_BAND0_RX_DONE (MT_INT_RX(MT_RXQ_MAIN) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_BAND1_RX_DONE (MT_INT_RX(MT_RXQ_BAND1) | \
+ MT_INT_RX(MT_RXQ_BAND1_WA) | \
+ MT_INT_RX(MT_RXQ_MAIN_WA))
+
+#define MT_INT_RX_DONE_ALL (MT_INT_RX_DONE_MCU | \
+ MT_INT_BAND0_RX_DONE | \
+ MT_INT_BAND1_RX_DONE)
+
+#define MT_INT_TX_DONE_FWDL BIT(26)
+#define MT_INT_TX_DONE_MCU_WM BIT(27)
+#define MT_INT_TX_DONE_MCU_WA BIT(15)
+#define MT_INT_TX_DONE_BAND0 BIT(30)
+#define MT_INT_TX_DONE_BAND1 BIT(31)
+#define MT_INT_TX_DONE_MCU_WA_MT7916 BIT(25)
+#define MT_INT_WED_TX_DONE_BAND0 BIT(4)
+#define MT_INT_WED_TX_DONE_BAND1 BIT(5)
+
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_MCU(MT_MCUQ_WA) | \
+ MT_INT_TX_MCU(MT_MCUQ_WM) | \
+ MT_INT_TX_MCU(MT_MCUQ_FWDL))
+
+#define MT_MCU_CMD __REG(INT_MCU_CMD_SOURCE)
+#define MT_MCU_CMD_STOP_DMA_FW_RELOAD BIT(1)
+#define MT_MCU_CMD_STOP_DMA BIT(2)
+#define MT_MCU_CMD_RESET_DONE BIT(3)
+#define MT_MCU_CMD_RECOVERY_DONE BIT(4)
+#define MT_MCU_CMD_NORMAL_STATE BIT(5)
+#define MT_MCU_CMD_ERROR_MASK GENMASK(5, 1)
+
+#define MT_MCU_CMD_WA_WDT BIT(31)
+#define MT_MCU_CMD_WM_WDT BIT(30)
+#define MT_MCU_CMD_WDT_MASK GENMASK(31, 30)
+
+/* TOP RGU */
+#define MT_TOP_RGU_BASE 0x18000000
+#define MT_TOP_PWR_CTRL (MT_TOP_RGU_BASE + (0x0))
+#define MT_TOP_PWR_KEY (0x5746 << 16)
+#define MT_TOP_PWR_SW_RST BIT(0)
+#define MT_TOP_PWR_SW_PWR_ON GENMASK(3, 2)
+#define MT_TOP_PWR_HW_CTRL BIT(4)
+#define MT_TOP_PWR_PWR_ON BIT(7)
+
+#define MT_TOP_RGU_SYSRAM_PDN (MT_TOP_RGU_BASE + 0x050)
+#define MT_TOP_RGU_SYSRAM_SLP (MT_TOP_RGU_BASE + 0x054)
+#define MT_TOP_WFSYS_PWR (MT_TOP_RGU_BASE + 0x010)
+#define MT_TOP_PWR_EN_MASK BIT(7)
+#define MT_TOP_PWR_ACK_MASK BIT(6)
+#define MT_TOP_PWR_KEY_MASK GENMASK(31, 16)
+
+#define MT7986_TOP_WM_RESET (MT_TOP_RGU_BASE + 0x120)
+#define MT7986_TOP_WM_RESET_MASK BIT(0)
+
+/* l1/l2 remap */
+#define MT_HIF_REMAP_L1 0xf11ac
+#define MT_HIF_REMAP_L1_MT7916 0xfe260
+#define MT_HIF_REMAP_L1_MASK GENMASK(15, 0)
+#define MT_HIF_REMAP_L1_OFFSET GENMASK(15, 0)
+#define MT_HIF_REMAP_L1_BASE GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L1 0xe0000
+
+#define MT_HIF_REMAP_L2 0xf11b0
+#define MT_HIF_REMAP_L2_MASK GENMASK(19, 0)
+#define MT_HIF_REMAP_L2_OFFSET GENMASK(11, 0)
+#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
+#define MT_HIF_REMAP_L2_MT7916 0x1b8
+#define MT_HIF_REMAP_L2_MASK_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_L2_OFFSET_MT7916 GENMASK(15, 0)
+#define MT_HIF_REMAP_L2_BASE_MT7916 GENMASK(31, 16)
+#define MT_HIF_REMAP_BASE_L2_MT7916 0x40000
+
+#define MT_INFRA_BASE 0x18000000
+#define MT_WFSYS0_PHY_START 0x18400000
+#define MT_WFSYS1_PHY_START 0x18800000
+#define MT_WFSYS1_PHY_END 0x18bfffff
+#define MT_CBTOP1_PHY_START 0x70000000
+#define MT_CBTOP1_PHY_END __REG(CBTOP1_PHY_END)
+#define MT_CBTOP2_PHY_START 0xf0000000
+#define MT_INFRA_MCU_START 0x7c000000
+#define MT_INFRA_MCU_END __REG(INFRA_MCU_ADDR_END)
+#define MT_CONN_INFRA_OFFSET(p) ((p) - MT_INFRA_BASE)
+
+/* CONN INFRA CFG */
+#define MT_CONN_INFRA_BASE 0x18001000
+#define MT_CONN_INFRA(ofs) (MT_CONN_INFRA_BASE + (ofs))
+
+#define MT_CONN_INFRA_EFUSE MT_CONN_INFRA(0x020)
+
+#define MT_CONN_INFRA_ADIE_RESET MT_CONN_INFRA(0x030)
+#define MT_CONN_INFRA_ADIE1_RESET_MASK BIT(0)
+#define MT_CONN_INFRA_ADIE2_RESET_MASK BIT(2)
+
+#define MT_CONN_INFRA_OSC_RC_EN MT_CONN_INFRA(0x380)
+
+#define MT_CONN_INFRA_OSC_CTRL MT_CONN_INFRA(0x300)
+#define MT_CONN_INFRA_OSC_RC_EN_MASK BIT(7)
+#define MT_CONN_INFRA_OSC_STB_TIME_MASK GENMASK(23, 0)
+
+#define MT_CONN_INFRA_HW_CTRL MT_CONN_INFRA(0x200)
+#define MT_CONN_INFRA_HW_CTRL_MASK BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT MT_CONN_INFRA(0x540)
+#define MT_CONN_INFRA_WF_SLP_PROT_MASK BIT(0)
+
+#define MT_CONN_INFRA_WF_SLP_PROT_RDY MT_CONN_INFRA(0x544)
+#define MT_CONN_INFRA_CONN_WF_MASK (BIT(29) | BIT(31))
+#define MT_CONN_INFRA_CONN (BIT(25) | BIT(29) | BIT(31))
+
+#define MT_CONN_INFRA_EMI_REQ MT_CONN_INFRA(0x414)
+#define MT_CONN_INFRA_EMI_REQ_MASK BIT(0)
+#define MT_CONN_INFRA_INFRA_REQ_MASK BIT(5)
+
+/* AFE */
+#define MT_AFE_CTRL_BASE(_band) (0x18003000 + ((_band) << 19))
+#define MT_AFE_CTRL(_band, ofs) (MT_AFE_CTRL_BASE(_band) + (ofs))
+
+#define MT_AFE_DIG_EN_01(_band) MT_AFE_CTRL(_band, 0x00)
+#define MT_AFE_DIG_EN_02(_band) MT_AFE_CTRL(_band, 0x04)
+#define MT_AFE_DIG_EN_03(_band) MT_AFE_CTRL(_band, 0x08)
+#define MT_AFE_DIG_TOP_01(_band) MT_AFE_CTRL(_band, 0x0c)
+
+#define MT_AFE_PLL_STB_TIME(_band) MT_AFE_CTRL(_band, 0xf4)
+#define MT_AFE_PLL_STB_TIME_MASK (GENMASK(30, 16) | GENMASK(14, 0))
+#define MT_AFE_PLL_STB_TIME_VAL (FIELD_PREP(GENMASK(30, 16), 0x4bc) | \
+ FIELD_PREP(GENMASK(14, 0), 0x7e4))
+#define MT_AFE_BPLL_CFG_MASK GENMASK(7, 6)
+#define MT_AFE_WPLL_CFG_MASK GENMASK(1, 0)
+#define MT_AFE_MCU_WPLL_CFG_MASK GENMASK(3, 2)
+#define MT_AFE_MCU_BPLL_CFG_MASK GENMASK(17, 16)
+#define MT_AFE_PLL_CFG_MASK (MT_AFE_BPLL_CFG_MASK | \
+ MT_AFE_WPLL_CFG_MASK | \
+ MT_AFE_MCU_WPLL_CFG_MASK | \
+ MT_AFE_MCU_BPLL_CFG_MASK)
+#define MT_AFE_PLL_CFG_VAL (FIELD_PREP(MT_AFE_BPLL_CFG_MASK, 0x1) | \
+ FIELD_PREP(MT_AFE_WPLL_CFG_MASK, 0x2) | \
+ FIELD_PREP(MT_AFE_MCU_WPLL_CFG_MASK, 0x1) | \
+ FIELD_PREP(MT_AFE_MCU_BPLL_CFG_MASK, 0x2))
+
+#define MT_AFE_DIG_TOP_01_MASK GENMASK(18, 15)
+#define MT_AFE_DIG_TOP_01_VAL FIELD_PREP(MT_AFE_DIG_TOP_01_MASK, 0x9)
+
+#define MT_AFE_RG_WBG_EN_RCK_MASK BIT(0)
+#define MT_AFE_RG_WBG_EN_BPLL_UP_MASK BIT(21)
+#define MT_AFE_RG_WBG_EN_WPLL_UP_MASK BIT(20)
+#define MT_AFE_RG_WBG_EN_PLL_UP_MASK (MT_AFE_RG_WBG_EN_BPLL_UP_MASK | \
+ MT_AFE_RG_WBG_EN_WPLL_UP_MASK)
+#define MT_AFE_RG_WBG_EN_TXCAL_WF4 BIT(29)
+#define MT_AFE_RG_WBG_EN_TXCAL_BT BIT(21)
+#define MT_AFE_RG_WBG_EN_TXCAL_WF3 BIT(20)
+#define MT_AFE_RG_WBG_EN_TXCAL_WF2 BIT(19)
+#define MT_AFE_RG_WBG_EN_TXCAL_WF1 BIT(18)
+#define MT_AFE_RG_WBG_EN_TXCAL_WF0 BIT(17)
+
+#define MT_ADIE_SLP_CTRL_BASE(_band) (0x18005000 + ((_band) << 19))
+#define MT_ADIE_SLP_CTRL(_band, ofs) (MT_ADIE_SLP_CTRL_BASE(_band) + (ofs))
+
+#define MT_ADIE_SLP_CTRL_CK0(_band) MT_ADIE_SLP_CTRL(_band, 0x120)
+
+/* ADIE */
+#define MT_ADIE_CHIP_ID 0x02c
+#define MT_ADIE_VERSION_MASK GENMASK(15, 0)
+#define MT_ADIE_CHIP_ID_MASK GENMASK(31, 16)
+#define MT_ADIE_IDX0 GENMASK(15, 0)
+#define MT_ADIE_IDX1 GENMASK(31, 16)
+
+#define MT_ADIE_RG_TOP_THADC_BG 0x034
+#define MT_ADIE_VRPI_SEL_CR_MASK GENMASK(15, 12)
+#define MT_ADIE_VRPI_SEL_EFUSE_MASK GENMASK(6, 3)
+
+#define MT_ADIE_RG_TOP_THADC 0x038
+#define MT_ADIE_PGA_GAIN_MASK GENMASK(25, 23)
+#define MT_ADIE_PGA_GAIN_EFUSE_MASK GENMASK(2, 0)
+#define MT_ADIE_LDO_CTRL_MASK GENMASK(27, 26)
+#define MT_ADIE_LDO_CTRL_EFUSE_MASK GENMASK(6, 5)
+
+#define MT_AFE_RG_ENCAL_WBTAC_IF_SW 0x070
+#define MT_ADIE_EFUSE_RDATA0 0x130
+
+#define MT_ADIE_EFUSE2_CTRL 0x148
+#define MT_ADIE_EFUSE_CTRL_MASK BIT(1)
+
+#define MT_ADIE_EFUSE_CFG 0x144
+#define MT_ADIE_EFUSE_MODE_MASK GENMASK(7, 6)
+#define MT_ADIE_EFUSE_ADDR_MASK GENMASK(25, 16)
+#define MT_ADIE_EFUSE_VALID_MASK BIT(29)
+#define MT_ADIE_EFUSE_KICK_MASK BIT(30)
+
+#define MT_ADIE_THADC_ANALOG 0x3a6
+
+#define MT_ADIE_THADC_SLOP 0x3a7
+#define MT_ADIE_ANA_EN_MASK BIT(7)
+
+#define MT_ADIE_7975_XTAL_CAL 0x3a1
+#define MT_ADIE_TRIM_MASK GENMASK(6, 0)
+#define MT_ADIE_EFUSE_TRIM_MASK GENMASK(5, 0)
+#define MT_ADIE_XO_TRIM_EN_MASK BIT(7)
+#define MT_ADIE_XTAL_DECREASE_MASK BIT(6)
+
+#define MT_ADIE_7975_XO_TRIM2 0x3a2
+#define MT_ADIE_7975_XO_TRIM3 0x3a3
+#define MT_ADIE_7975_XO_TRIM4 0x3a4
+#define MT_ADIE_7975_XTAL_EN 0x3a5
+
+#define MT_ADIE_XO_TRIM_FLOW 0x3ac
+#define MT_ADIE_XTAL_AXM_80M_OSC 0x390
+#define MT_ADIE_XTAL_AXM_40M_OSC 0x391
+#define MT_ADIE_XTAL_TRIM1_80M_OSC 0x398
+#define MT_ADIE_XTAL_TRIM1_40M_OSC 0x399
+#define MT_ADIE_WRI_CK_SEL 0x4ac
+#define MT_ADIE_RG_STRAP_PIN_IN 0x4fc
+#define MT_ADIE_XTAL_C1 0x654
+#define MT_ADIE_XTAL_C2 0x658
+#define MT_ADIE_RG_XO_01 0x65c
+#define MT_ADIE_RG_XO_03 0x664
+
+#define MT_ADIE_CLK_EN 0xa00
+
+#define MT_ADIE_7975_XTAL 0xa18
+#define MT_ADIE_7975_XTAL_EN_MASK BIT(29)
+
+#define MT_ADIE_7975_COCLK 0xa1c
+#define MT_ADIE_7975_XO_2 0xa84
+#define MT_ADIE_7975_XO_2_FIX_EN BIT(31)
+
+#define MT_ADIE_7975_XO_CTRL2 0xa94
+#define MT_ADIE_7975_XO_CTRL2_C1_MASK GENMASK(26, 20)
+#define MT_ADIE_7975_XO_CTRL2_C2_MASK GENMASK(18, 12)
+#define MT_ADIE_7975_XO_CTRL2_MASK (MT_ADIE_7975_XO_CTRL2_C1_MASK | \
+ MT_ADIE_7975_XO_CTRL2_C2_MASK)
+
+#define MT_ADIE_7975_XO_CTRL6 0xaa4
+#define MT_ADIE_7975_XO_CTRL6_MASK BIT(16)
+
+/* TOP SPI */
+#define MT_TOP_SPI_ADIE_BASE(_band) (0x18004000 + ((_band) << 19))
+#define MT_TOP_SPI_ADIE(_band, ofs) (MT_TOP_SPI_ADIE_BASE(_band) + (ofs))
+
+#define MT_TOP_SPI_BUSY_CR(_band) MT_TOP_SPI_ADIE(_band, 0)
+#define MT_TOP_SPI_POLLING_BIT BIT(5)
+
+#define MT_TOP_SPI_ADDR_CR(_band) MT_TOP_SPI_ADIE(_band, 0x50)
+#define MT_TOP_SPI_READ_ADDR_FORMAT (BIT(12) | BIT(13) | BIT(15))
+#define MT_TOP_SPI_WRITE_ADDR_FORMAT (BIT(13) | BIT(15))
+
+#define MT_TOP_SPI_WRITE_DATA_CR(_band) MT_TOP_SPI_ADIE(_band, 0x54)
+#define MT_TOP_SPI_READ_DATA_CR(_band) MT_TOP_SPI_ADIE(_band, 0x58)
+
+/* CONN INFRA CKGEN */
+#define MT_INFRA_CKGEN_BASE 0x18009000
+#define MT_INFRA_CKGEN(ofs) (MT_INFRA_CKGEN_BASE + (ofs))
+
+#define MT_INFRA_CKGEN_BUS MT_INFRA_CKGEN(0xa00)
+#define MT_INFRA_CKGEN_BUS_CLK_SEL_MASK BIT(23)
+#define MT_INFRA_CKGEN_BUS_RDY_SEL_MASK BIT(29)
+
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_1 MT_INFRA_CKGEN(0x008)
+#define MT_INFRA_CKGEN_BUS_WPLL_DIV_2 MT_INFRA_CKGEN(0x00c)
+
+#define MT_INFRA_CKGEN_RFSPI_WPLL_DIV MT_INFRA_CKGEN(0x040)
+#define MT_INFRA_CKGEN_DIV_SEL_MASK GENMASK(7, 2)
+#define MT_INFRA_CKGEN_DIV_EN_MASK BIT(0)
+
+/* CONN INFRA BUS */
+#define MT_INFRA_BUS_BASE 0x1800e000
+#define MT_INFRA_BUS(ofs) (MT_INFRA_BUS_BASE + (ofs))
+
+#define MT_INFRA_BUS_OFF_TIMEOUT MT_INFRA_BUS(0x300)
+#define MT_INFRA_BUS_TIMEOUT_LIMIT_MASK GENMASK(14, 7)
+#define MT_INFRA_BUS_TIMEOUT_EN_MASK GENMASK(3, 0)
+
+#define MT_INFRA_BUS_ON_TIMEOUT MT_INFRA_BUS(0x31c)
+#define MT_INFRA_BUS_EMI_START MT_INFRA_BUS(0x360)
+#define MT_INFRA_BUS_EMI_END MT_INFRA_BUS(0x364)
+
+/* CONN_INFRA_SKU */
+#define MT_CONNINFRA_SKU_DEC_ADDR 0x18050000
+#define MT_CONNINFRA_SKU_MASK GENMASK(15, 0)
+#define MT_ADIE_TYPE_MASK BIT(1)
+
+/* FW MODE SYNC */
+#define MT_FW_ASSERT_STAT __REG(FW_ASSERT_STAT_ADDR)
+#define MT_FW_EXCEPT_TYPE __REG(FW_EXCEPT_TYPE_ADDR)
+#define MT_FW_EXCEPT_COUNT __REG(FW_EXCEPT_COUNT_ADDR)
+#define MT_FW_CIRQ_COUNT __REG(FW_CIRQ_COUNT_ADDR)
+#define MT_FW_CIRQ_IDX __REG(FW_CIRQ_IDX_ADDR)
+#define MT_FW_CIRQ_LISR __REG(FW_CIRQ_LISR_ADDR)
+#define MT_FW_TASK_ID __REG(FW_TASK_ID_ADDR)
+#define MT_FW_TASK_IDX __REG(FW_TASK_IDX_ADDR)
+#define MT_FW_TASK_QID1 __REG(FW_TASK_QID1_ADDR)
+#define MT_FW_TASK_QID2 __REG(FW_TASK_QID2_ADDR)
+#define MT_FW_TASK_START __REG(FW_TASK_START_ADDR)
+#define MT_FW_TASK_END __REG(FW_TASK_END_ADDR)
+#define MT_FW_TASK_SIZE __REG(FW_TASK_SIZE_ADDR)
+#define MT_FW_LAST_MSG_ID __REG(FW_LAST_MSG_ID_ADDR)
+#define MT_FW_EINT_INFO __REG(FW_EINT_INFO_ADDR)
+#define MT_FW_SCHED_INFO __REG(FW_SCHED_INFO_ADDR)
+
+#define MT_SWDEF_BASE __REG(SWDEF_BASE_ADDR)
+
+#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
+#define MT_SWDEF_MODE MT_SWDEF(0x3c)
+#define MT_SWDEF_NORMAL_MODE 0
+#define MT_SWDEF_ICAP_MODE 1
+#define MT_SWDEF_SPECTRUM_MODE 2
+
+#define MT_SWDEF_SER_STATS MT_SWDEF(0x040)
+#define MT_SWDEF_PLE_STATS MT_SWDEF(0x044)
+#define MT_SWDEF_PLE1_STATS MT_SWDEF(0x048)
+#define MT_SWDEF_PLE_AMSDU_STATS MT_SWDEF(0x04C)
+#define MT_SWDEF_PSE_STATS MT_SWDEF(0x050)
+#define MT_SWDEF_PSE1_STATS MT_SWDEF(0x054)
+#define MT_SWDEF_LAMC_WISR6_BN0_STATS MT_SWDEF(0x058)
+#define MT_SWDEF_LAMC_WISR6_BN1_STATS MT_SWDEF(0x05C)
+#define MT_SWDEF_LAMC_WISR7_BN0_STATS MT_SWDEF(0x060)
+#define MT_SWDEF_LAMC_WISR7_BN1_STATS MT_SWDEF(0x064)
+
+#define MT_DIC_CMD_REG_BASE 0x41f000
+#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs))
+#define MT_DIC_CMD_REG_CMD MT_DIC_CMD_REG(0x10)
+
+#define MT_CPU_UTIL_BASE 0x41f030
+#define MT_CPU_UTIL(ofs) (MT_CPU_UTIL_BASE + (ofs))
+#define MT_CPU_UTIL_BUSY_PCT MT_CPU_UTIL(0x00)
+#define MT_CPU_UTIL_PEAK_BUSY_PCT MT_CPU_UTIL(0x04)
+#define MT_CPU_UTIL_IDLE_CNT MT_CPU_UTIL(0x08)
+#define MT_CPU_UTIL_PEAK_IDLE_CNT MT_CPU_UTIL(0x0c)
+#define MT_CPU_UTIL_CTRL MT_CPU_UTIL(0x1c)
+
+/* LED */
+#define MT_LED_TOP_BASE 0x18013000
+#define MT_LED_PHYS(_n) (MT_LED_TOP_BASE + (_n))
+
+#define MT_LED_CTRL(_n) MT_LED_PHYS(0x00 + ((_n) * 4))
+#define MT_LED_CTRL_KICK BIT(7)
+#define MT_LED_CTRL_BAND BIT(4)
+#define MT_LED_CTRL_BLINK_MODE BIT(2)
+#define MT_LED_CTRL_POLARITY BIT(1)
+
+#define MT_LED_TX_BLINK(_n) MT_LED_PHYS(0x10 + ((_n) * 4))
+#define MT_LED_TX_BLINK_ON_MASK GENMASK(7, 0)
+#define MT_LED_TX_BLINK_OFF_MASK GENMASK(15, 8)
+
+#define MT_LED_STATUS_0(_n) MT_LED_PHYS(0x20 + ((_n) * 8))
+#define MT_LED_STATUS_1(_n) MT_LED_PHYS(0x24 + ((_n) * 8))
+#define MT_LED_STATUS_OFF GENMASK(31, 24)
+#define MT_LED_STATUS_ON GENMASK(23, 16)
+#define MT_LED_STATUS_DURATION GENMASK(15, 0)
+
+#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
+
+#define MT_LED_GPIO_MUX0 0x70005050 /* GPIO 1 and GPIO 2 */
+#define MT_LED_GPIO_MUX1 0x70005054 /* GPIO 14 and 15 */
+#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
+#define MT_LED_GPIO_MUX3 0x7000505c /* GPIO 26 */
+
+/* MT TOP */
+#define MT_TOP_BASE 0x18060000
+#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
+
+#define MT_TOP_LPCR_HOST_BAND(_band) MT_TOP(0x10 + ((_band) * 0x10))
+#define MT_TOP_LPCR_HOST_FW_OWN BIT(0)
+#define MT_TOP_LPCR_HOST_DRV_OWN BIT(1)
+#define MT_TOP_LPCR_HOST_FW_OWN_STAT BIT(2)
+
+#define MT_TOP_LPCR_HOST_BAND_IRQ_STAT(_band) MT_TOP(0x14 + ((_band) * 0x10))
+#define MT_TOP_LPCR_HOST_BAND_STAT BIT(0)
+
+#define MT_TOP_MISC MT_TOP(0xf0)
+#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+
+#define MT_TOP_WFSYS_WAKEUP MT_TOP(0x1a4)
+#define MT_TOP_WFSYS_WAKEUP_MASK BIT(0)
+
+#define MT_TOP_MCU_EMI_BASE MT_TOP(0x1c4)
+#define MT_TOP_MCU_EMI_BASE_MASK GENMASK(19, 0)
+
+#define MT_TOP_WF_AP_PERI_BASE MT_TOP(0x1c8)
+#define MT_TOP_WF_AP_PERI_BASE_MASK GENMASK(19, 0)
+
+#define MT_TOP_EFUSE_BASE MT_TOP(0x1cc)
+#define MT_TOP_EFUSE_BASE_MASK GENMASK(19, 0)
+
+#define MT_TOP_CONN_INFRA_WAKEUP MT_TOP(0x1a0)
+#define MT_TOP_CONN_INFRA_WAKEUP_MASK BIT(0)
+
+#define MT_TOP_WFSYS_RESET_STATUS MT_TOP(0x2cc)
+#define MT_TOP_WFSYS_RESET_STATUS_MASK BIT(30)
+
+/* SEMA */
+#define MT_SEMA_BASE 0x18070000
+#define MT_SEMA(ofs) (MT_SEMA_BASE + (ofs))
+
+#define MT_SEMA_RFSPI_STATUS (MT_SEMA(0x2000) + (11 * 4))
+#define MT_SEMA_RFSPI_RELEASE (MT_SEMA(0x2200) + (11 * 4))
+#define MT_SEMA_RFSPI_STATUS_MASK BIT(1)
+
+/* MCU BUS */
+#define MT_MCU_BUS_BASE 0x18400000
+#define MT_MCU_BUS(ofs) (MT_MCU_BUS_BASE + (ofs))
+
+#define MT_MCU_BUS_TIMEOUT MT_MCU_BUS(0xf0440)
+#define MT_MCU_BUS_TIMEOUT_SET_MASK GENMASK(7, 0)
+#define MT_MCU_BUS_TIMEOUT_CG_EN_MASK BIT(28)
+#define MT_MCU_BUS_TIMEOUT_EN_MASK BIT(31)
+
+#define MT_MCU_BUS_REMAP MT_MCU_BUS(0x120)
+
+/* TOP CFG */
+#define MT_TOP_CFG_BASE 0x184b0000
+#define MT_TOP_CFG(ofs) (MT_TOP_CFG_BASE + (ofs))
+
+#define MT_TOP_CFG_IP_VERSION_ADDR MT_TOP_CFG(0x010)
+
+/* TOP CFG ON */
+#define MT_TOP_CFG_ON_BASE 0x184c1000
+#define MT_TOP_CFG_ON(ofs) (MT_TOP_CFG_ON_BASE + (ofs))
+
+#define MT_TOP_CFG_ON_ROM_IDX MT_TOP_CFG_ON(0x604)
+
+/* SLP CTRL */
+#define MT_SLP_BASE 0x184c3000
+#define MT_SLP(ofs) (MT_SLP_BASE + (ofs))
+
+#define MT_SLP_STATUS MT_SLP(0x00c)
+#define MT_SLP_WFDMA2CONN_MASK (BIT(21) | BIT(23))
+#define MT_SLP_CTRL_EN_MASK BIT(0)
+#define MT_SLP_CTRL_BSY_MASK BIT(1)
+
+/* MCU BUS DBG */
+#define MT_MCU_BUS_DBG_BASE 0x18500000
+#define MT_MCU_BUS_DBG(ofs) (MT_MCU_BUS_DBG_BASE + (ofs))
+
+#define MT_MCU_BUS_DBG_TIMEOUT MT_MCU_BUS_DBG(0x0)
+#define MT_MCU_BUS_DBG_TIMEOUT_SET_MASK GENMASK(31, 16)
+#define MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK BIT(3)
+#define MT_MCU_BUS_DBG_TIMEOUT_EN_MASK BIT(2)
+
+#define MT_HW_BOUND 0x70010020
+#define MT_HW_REV 0x70010204
+#define MT_WF_SUBSYS_RST 0x70002600
+
+/* PCIE MAC */
+#define MT_PCIE_MAC_BASE 0x74030000
+#define MT_PCIE_MAC(ofs) (MT_PCIE_MAC_BASE + (ofs))
+#define MT_PCIE_MAC_INT_ENABLE MT_PCIE_MAC(0x188)
+
+#define MT_PCIE1_MAC_INT_ENABLE 0x74020188
+#define MT_PCIE1_MAC_INT_ENABLE_MT7916 0x74090188
+
+#define MT_WM_MCU_PC 0x7c060204
+#define MT_WA_MCU_PC 0x7c06020c
+
+/* PP TOP */
+#define MT_WF_PP_TOP_BASE 0x820cc000
+#define MT_WF_PP_TOP(ofs) (MT_WF_PP_TOP_BASE + (ofs))
+
+#define MT_WF_PP_TOP_RXQ_WFDMA_CF_5 MT_WF_PP_TOP(0x0e8)
+#define MT_WF_PP_TOP_RXQ_QID6_WFDMA_HIF_SEL_MASK BIT(6)
+
+#define MT_WF_IRPI_BASE 0x83000000
+#define MT_WF_IRPI(ofs) (MT_WF_IRPI_BASE + (ofs))
+
+#define MT_WF_IRPI_NSS(phy, nss) MT_WF_IRPI(0x6000 + ((phy) << 20) + ((nss) << 16))
+#define MT_WF_IRPI_NSS_MT7916(phy, nss) MT_WF_IRPI(0x1000 + ((phy) << 20) + ((nss) << 16))
+
+/* PHY */
+#define MT_WF_PHY_BASE 0x83080000
+#define MT_WF_PHY(ofs) (MT_WF_PHY_BASE + (ofs))
+
+#define MT_WF_PHY_RX_CTRL1(_phy) MT_WF_PHY(0x2004 + ((_phy) << 16))
+#define MT_WF_PHY_RX_CTRL1_MT7916(_phy) MT_WF_PHY(0x2004 + ((_phy) << 20))
+#define MT_WF_PHY_RX_CTRL1_IPI_EN GENMASK(2, 0)
+#define MT_WF_PHY_RX_CTRL1_STSCNT_EN GENMASK(11, 9)
+
+#define MT_WF_PHY_RXTD12(_phy) MT_WF_PHY(0x8230 + ((_phy) << 16))
+#define MT_WF_PHY_RXTD12_MT7916(_phy) MT_WF_PHY(0x8230 + ((_phy) << 20))
+#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
+#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
+
+#define MT_WF_PHY_TPC_CTRL_STAT(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 16))
+#define MT_WF_PHY_TPC_CTRL_STAT_MT7916(_phy) MT_WF_PHY(0xe7a0 + ((_phy) << 20))
+#define MT_WF_PHY_TPC_POWER GENMASK(15, 8)
+
+#define MT_MCU_WM_CIRQ_BASE 0x89010000
+#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))
+#define MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x80)
+#define MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR MT_MCU_WM_CIRQ(0xc0)
+#define MT_MCU_WM_CIRQ_EINT_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x108)
+#define MT_MCU_WM_CIRQ_EINT_SOFT_ADDR MT_MCU_WM_CIRQ(0x118)
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
new file mode 100644
index 0000000000..37348b2087
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -0,0 +1,1322 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2022 MediaTek Inc. */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/of_gpio.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/clk.h>
+
+#include "mt7915.h"
+
+#define MT7981_CON_INFRA_VERSION 0x02090000
+#define MT7986_CON_INFRA_VERSION 0x02070000
+
+/* INFRACFG */
+#define MT_INFRACFG_CONN2AP_SLPPROT 0x0d0
+#define MT_INFRACFG_AP2CONN_SLPPROT 0x0d4
+
+#define MT_INFRACFG_RX_EN_MASK BIT(16)
+#define MT_INFRACFG_TX_RDY_MASK BIT(4)
+#define MT_INFRACFG_TX_EN_MASK BIT(0)
+
+/* TOP POS */
+#define MT_TOP_POS_FAST_CTRL 0x114
+#define MT_TOP_POS_FAST_EN_MASK BIT(3)
+
+#define MT_TOP_POS_SKU 0x21c
+#define MT_TOP_POS_SKU_MASK GENMASK(31, 28)
+#define MT_TOP_POS_SKU_ADIE_DBDC_MASK BIT(2)
+
+enum {
+ ADIE_SB,
+ ADIE_DBDC
+};
+
+static int
+mt76_wmac_spi_read(struct mt7915_dev *dev, u8 adie, u32 addr, u32 *val)
+{
+ int ret;
+ u32 cur;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+ MT_TOP_SPI_READ_ADDR_FORMAT | addr);
+ mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), 0);
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ *val = mt76_rr(dev, MT_TOP_SPI_READ_DATA_CR(adie));
+
+ return 0;
+}
+
+static int
+mt76_wmac_spi_write(struct mt7915_dev *dev, u8 adie, u32 addr, u32 val)
+{
+ int ret;
+ u32 cur;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+ if (ret)
+ return ret;
+
+ mt76_wr(dev, MT_TOP_SPI_ADDR_CR(adie),
+ MT_TOP_SPI_WRITE_ADDR_FORMAT | addr);
+ mt76_wr(dev, MT_TOP_SPI_WRITE_DATA_CR(adie), val);
+
+ return read_poll_timeout(mt76_rr, cur, !(cur & MT_TOP_SPI_POLLING_BIT),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_SPI_BUSY_CR(adie));
+}
+
+static int
+mt76_wmac_spi_rmw(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 mask, u32 val)
+{
+ u32 cur, ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, addr, &cur);
+ if (ret)
+ return ret;
+
+ cur &= ~mask;
+ cur |= val;
+
+ return mt76_wmac_spi_write(dev, adie, addr, cur);
+}
+
+static int
+mt7986_wmac_adie_efuse_read(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 *data)
+{
+ int ret, temp;
+ u32 val, mask;
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_EFUSE_CFG,
+ MT_ADIE_EFUSE_CTRL_MASK);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, BIT(30), 0x0);
+ if (ret)
+ return ret;
+
+ mask = (MT_ADIE_EFUSE_MODE_MASK | MT_ADIE_EFUSE_ADDR_MASK |
+ MT_ADIE_EFUSE_KICK_MASK);
+ val = FIELD_PREP(MT_ADIE_EFUSE_MODE_MASK, 0) |
+ FIELD_PREP(MT_ADIE_EFUSE_ADDR_MASK, addr) |
+ FIELD_PREP(MT_ADIE_EFUSE_KICK_MASK, 1);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_EFUSE2_CTRL, mask, val);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(mt76_wmac_spi_read, temp,
+ !temp && !FIELD_GET(MT_ADIE_EFUSE_KICK_MASK, val),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE2_CTRL, &val);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MT_ADIE_EFUSE_VALID_MASK, val) == 1)
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_EFUSE_RDATA0,
+ data);
+
+ return ret;
+}
+
+static inline void mt76_wmac_spi_lock(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ read_poll_timeout(mt76_rr, cur,
+ FIELD_GET(MT_SEMA_RFSPI_STATUS_MASK, cur),
+ 1000, 1000 * MSEC_PER_SEC, false, dev,
+ MT_SEMA_RFSPI_STATUS);
+}
+
+static inline void mt76_wmac_spi_unlock(struct mt7915_dev *dev)
+{
+ mt76_wr(dev, MT_SEMA_RFSPI_RELEASE, 1);
+}
+
+static u32 mt76_wmac_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+ val |= readl(base + offset) & ~mask;
+ writel(val, base + offset);
+
+ return val;
+}
+
+static u8 mt798x_wmac_check_adie_type(struct mt7915_dev *dev)
+{
+ u32 val;
+
+ /* Only DBDC A-die is used with MT7981 */
+ if (is_mt7981(&dev->mt76))
+ return ADIE_DBDC;
+
+ val = readl(dev->sku + MT_TOP_POS_SKU);
+
+ return FIELD_GET(MT_TOP_POS_SKU_ADIE_DBDC_MASK, val);
+}
+
+static int mt7986_wmac_consys_reset(struct mt7915_dev *dev, bool enable)
+{
+ if (!enable)
+ return reset_control_assert(dev->rstc);
+
+ mt76_wmac_rmw(dev->sku, MT_TOP_POS_FAST_CTRL,
+ MT_TOP_POS_FAST_EN_MASK,
+ FIELD_PREP(MT_TOP_POS_FAST_EN_MASK, 0x1));
+
+ return reset_control_deassert(dev->rstc);
+}
+
+static int mt7986_wmac_gpio_setup(struct mt7915_dev *dev)
+{
+ struct pinctrl_state *state;
+ struct pinctrl *pinctrl;
+ int ret;
+ u8 type;
+
+ type = mt798x_wmac_check_adie_type(dev);
+ pinctrl = devm_pinctrl_get(dev->mt76.dev);
+ if (IS_ERR(pinctrl))
+ return PTR_ERR(pinctrl);
+
+ switch (type) {
+ case ADIE_SB:
+ state = pinctrl_lookup_state(pinctrl, "default");
+ if (IS_ERR_OR_NULL(state))
+ return -EINVAL;
+ break;
+ case ADIE_DBDC:
+ state = pinctrl_lookup_state(pinctrl, "dbdc");
+ if (IS_ERR_OR_NULL(state))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = pinctrl_select_state(pinctrl, state);
+ if (ret)
+ return ret;
+
+ usleep_range(500, 1000);
+
+ return 0;
+}
+
+static int mt7986_wmac_consys_lockup(struct mt7915_dev *dev, bool enable)
+{
+ int ret;
+ u32 cur;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+ MT_INFRACFG_RX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+ ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_RX_EN_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+ if (ret)
+ return ret;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_AP2CONN_SLPPROT,
+ MT_INFRACFG_TX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+ ret = read_poll_timeout(readl, cur, !(cur & MT_INFRACFG_TX_RDY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev->dcm + MT_INFRACFG_AP2CONN_SLPPROT);
+ if (ret)
+ return ret;
+
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+ MT_INFRACFG_RX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_RX_EN_MASK, enable));
+ mt76_wmac_rmw(dev->dcm, MT_INFRACFG_CONN2AP_SLPPROT,
+ MT_INFRACFG_TX_EN_MASK,
+ FIELD_PREP(MT_INFRACFG_TX_EN_MASK, enable));
+
+ return 0;
+}
+
+static int mt798x_wmac_coninfra_check(struct mt7915_dev *dev)
+{
+ u32 cur;
+ u32 con_infra_version;
+
+ if (is_mt7981(&dev->mt76)) {
+ con_infra_version = MT7981_CON_INFRA_VERSION;
+ } else if (is_mt7986(&dev->mt76)) {
+ con_infra_version = MT7986_CON_INFRA_VERSION;
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return read_poll_timeout(mt76_rr, cur, (cur == con_infra_version),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC,
+ false, dev, MT_CONN_INFRA_BASE);
+}
+
+static int mt798x_wmac_coninfra_setup(struct mt7915_dev *dev)
+{
+ struct device *pdev = dev->mt76.dev;
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ u32 val;
+
+ np = of_parse_phandle(pdev->of_node, "memory-region", 0);
+ if (!np)
+ return -EINVAL;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+ if (!rmem)
+ return -EINVAL;
+
+ val = (rmem->base >> 16) & MT_TOP_MCU_EMI_BASE_MASK;
+
+ if (is_mt7986(&dev->mt76)) {
+ /* Set conninfra subsys PLL check */
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_RDY_SEL_MASK, 0x1);
+ }
+
+ mt76_rmw_field(dev, MT_TOP_MCU_EMI_BASE,
+ MT_TOP_MCU_EMI_BASE_MASK, val);
+
+ if (is_mt7981(&dev->mt76)) {
+ mt76_rmw_field(dev, MT_TOP_WF_AP_PERI_BASE,
+ MT_TOP_WF_AP_PERI_BASE_MASK, 0x300d0000 >> 16);
+
+ mt76_rmw_field(dev, MT_TOP_EFUSE_BASE,
+ MT_TOP_EFUSE_BASE_MASK, 0x11f20000 >> 16);
+ }
+
+ mt76_wr(dev, MT_INFRA_BUS_EMI_START, rmem->base);
+ mt76_wr(dev, MT_INFRA_BUS_EMI_END, rmem->size);
+
+ mt76_rr(dev, MT_CONN_INFRA_EFUSE);
+
+ /* Set conninfra sysram */
+ mt76_wr(dev, MT_TOP_RGU_SYSRAM_PDN, 0);
+ mt76_wr(dev, MT_TOP_RGU_SYSRAM_SLP, 1);
+
+ return 0;
+}
+
+static int mt798x_wmac_sku_setup(struct mt7915_dev *dev, u32 *adie_type)
+{
+ int ret;
+ u32 adie_main = 0, adie_ext = 0;
+
+ mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+ MT_CONN_INFRA_ADIE1_RESET_MASK, 0x1);
+
+ if (is_mt7986(&dev->mt76)) {
+ mt76_rmw_field(dev, MT_CONN_INFRA_ADIE_RESET,
+ MT_CONN_INFRA_ADIE2_RESET_MASK, 0x1);
+ }
+
+ mt76_wmac_spi_lock(dev);
+
+ ret = mt76_wmac_spi_read(dev, 0, MT_ADIE_CHIP_ID, &adie_main);
+ if (ret)
+ goto out;
+
+ if (is_mt7986(&dev->mt76)) {
+ ret = mt76_wmac_spi_read(dev, 1, MT_ADIE_CHIP_ID, &adie_ext);
+ if (ret)
+ goto out;
+ }
+
+ *adie_type = FIELD_GET(MT_ADIE_CHIP_ID_MASK, adie_main) |
+ (MT_ADIE_CHIP_ID_MASK & adie_ext);
+
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return 0;
+}
+
+static inline u16 mt7986_adie_idx(u8 adie, u32 adie_type)
+{
+ if (adie == 0)
+ return u32_get_bits(adie_type, MT_ADIE_IDX0);
+ else
+ return u32_get_bits(adie_type, MT_ADIE_IDX1);
+}
+
+static inline bool is_7975(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ return mt7986_adie_idx(adie, adie_type) == 0x7975;
+}
+
+static inline bool is_7976(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ return mt7986_adie_idx(adie, adie_type) == 0x7976;
+}
+
+static int mt7986_wmac_adie_thermal_cal(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+ u32 data, val;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_ANALOG,
+ &data);
+ if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+ val = FIELD_GET(MT_ADIE_VRPI_SEL_EFUSE_MASK, data);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC_BG,
+ MT_ADIE_VRPI_SEL_CR_MASK,
+ FIELD_PREP(MT_ADIE_VRPI_SEL_CR_MASK, val));
+ if (ret)
+ return ret;
+
+ val = FIELD_GET(MT_ADIE_PGA_GAIN_EFUSE_MASK, data);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+ MT_ADIE_PGA_GAIN_MASK,
+ FIELD_PREP(MT_ADIE_PGA_GAIN_MASK, val));
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_THADC_SLOP,
+ &data);
+ if (ret || FIELD_GET(MT_ADIE_ANA_EN_MASK, data)) {
+ val = FIELD_GET(MT_ADIE_LDO_CTRL_EFUSE_MASK, data);
+
+ return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_RG_TOP_THADC,
+ MT_ADIE_LDO_CTRL_MASK,
+ FIELD_PREP(MT_ADIE_LDO_CTRL_MASK, val));
+ }
+
+ return 0;
+}
+
+static int
+mt7986_read_efuse_xo_trim_7976(struct mt7915_dev *dev, u8 adie,
+ bool is_40m, int *result)
+{
+ int ret;
+ u32 data, addr;
+
+ addr = is_40m ? MT_ADIE_XTAL_AXM_40M_OSC : MT_ADIE_XTAL_AXM_80M_OSC;
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data)) {
+ *result = 64;
+ } else {
+ *result = FIELD_GET(MT_ADIE_TRIM_MASK, data);
+ addr = is_40m ? MT_ADIE_XTAL_TRIM1_40M_OSC :
+ MT_ADIE_XTAL_TRIM1_80M_OSC;
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data) &&
+ FIELD_GET(MT_ADIE_XTAL_DECREASE_MASK, data))
+ *result -= FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+ else if (FIELD_GET(MT_ADIE_XO_TRIM_EN_MASK, data))
+ *result += FIELD_GET(MT_ADIE_EFUSE_TRIM_MASK, data);
+
+ *result = max(0, min(127, *result));
+ }
+
+ return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7976(struct mt7915_dev *dev, u8 adie)
+{
+ int ret, trim_80m, trim_40m;
+ u32 data, val, mode;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_XO_TRIM_FLOW,
+ &data);
+ if (ret || !FIELD_GET(BIT(1), data))
+ return 0;
+
+ ret = mt7986_read_efuse_xo_trim_7976(dev, adie, false, &trim_80m);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7976(dev, adie, true, &trim_40m);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_RG_STRAP_PIN_IN, &val);
+ if (ret)
+ return ret;
+
+ mode = FIELD_PREP(GENMASK(6, 4), val);
+ if (!mode || mode == 0x2) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+ GENMASK(31, 24),
+ FIELD_PREP(GENMASK(31, 24), trim_80m));
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+ GENMASK(31, 24),
+ FIELD_PREP(GENMASK(31, 24), trim_80m));
+ } else if (mode == 0x3 || mode == 0x4 || mode == 0x6) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C1,
+ GENMASK(23, 16),
+ FIELD_PREP(GENMASK(23, 16), trim_40m));
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_XTAL_C2,
+ GENMASK(23, 16),
+ FIELD_PREP(GENMASK(23, 16), trim_40m));
+ }
+
+ return ret;
+}
+
+static int mt798x_wmac_adie_patch_7976(struct mt7915_dev *dev, u8 adie)
+{
+ u32 id, version, rg_xo_01, rg_xo_03;
+ int ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_CHIP_ID, &id);
+ if (ret)
+ return ret;
+
+ version = FIELD_GET(MT_ADIE_VERSION_MASK, id);
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_TOP_THADC, 0x4a563b00);
+ if (ret)
+ return ret;
+
+ if (version == 0x8a00 || version == 0x8a10 || version == 0x8b00) {
+ rg_xo_01 = 0x1d59080f;
+ rg_xo_03 = 0x34c00fe0;
+ } else {
+ if (is_mt7981(&dev->mt76)) {
+ rg_xo_01 = 0x1959c80f;
+ } else if (is_mt7986(&dev->mt76)) {
+ rg_xo_01 = 0x1959f80f;
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ rg_xo_03 = 0x34d00fe0;
+ }
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_01, rg_xo_01);
+ if (ret)
+ return ret;
+
+ return mt76_wmac_spi_write(dev, adie, MT_ADIE_RG_XO_03, rg_xo_03);
+}
+
+static int
+mt7986_read_efuse_xo_trim_7975(struct mt7915_dev *dev, u8 adie,
+ u32 addr, u32 *result)
+{
+ int ret;
+ u32 data;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, addr, &data);
+ if (ret)
+ return ret;
+
+ if ((data & MT_ADIE_XO_TRIM_EN_MASK)) {
+ if ((data & MT_ADIE_XTAL_DECREASE_MASK))
+ *result -= (data & MT_ADIE_EFUSE_TRIM_MASK);
+ else
+ *result += (data & MT_ADIE_EFUSE_TRIM_MASK);
+
+ *result = (*result & MT_ADIE_TRIM_MASK);
+ }
+
+ return 0;
+}
+
+static int mt7986_wmac_adie_xtal_trim_7975(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+ u32 data, result = 0, value;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_EN,
+ &data);
+ if (ret || !(data & BIT(1)))
+ return 0;
+
+ ret = mt7986_wmac_adie_efuse_read(dev, adie, MT_ADIE_7975_XTAL_CAL,
+ &data);
+ if (ret)
+ return ret;
+
+ if (data & MT_ADIE_XO_TRIM_EN_MASK)
+ result = (data & MT_ADIE_TRIM_MASK);
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM2,
+ &result);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM3,
+ &result);
+ if (ret)
+ return ret;
+
+ ret = mt7986_read_efuse_xo_trim_7975(dev, adie, MT_ADIE_7975_XO_TRIM4,
+ &result);
+ if (ret)
+ return ret;
+
+ /* Update trim value to C1 and C2*/
+ value = FIELD_GET(MT_ADIE_7975_XO_CTRL2_C1_MASK, result) |
+ FIELD_GET(MT_ADIE_7975_XO_CTRL2_C2_MASK, result);
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL2,
+ MT_ADIE_7975_XO_CTRL2_MASK, value);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_read(dev, adie, MT_ADIE_7975_XTAL, &value);
+ if (ret)
+ return ret;
+
+ if (value & MT_ADIE_7975_XTAL_EN_MASK) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_2,
+ MT_ADIE_7975_XO_2_FIX_EN, 0x0);
+ if (ret)
+ return ret;
+ }
+
+ return mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_XO_CTRL6,
+ MT_ADIE_7975_XO_CTRL6_MASK, 0x1);
+}
+
+static int mt7986_wmac_adie_patch_7975(struct mt7915_dev *dev, u8 adie)
+{
+ int ret;
+
+ /* disable CAL LDO and fine tune RFDIG LDO */
+ ret = mt76_wmac_spi_write(dev, adie, 0x348, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x378, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3a8, 0x00000002);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3d8, 0x00000002);
+ if (ret)
+ return ret;
+
+ /* set CKA driving and filter */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa1c, 0x30000aaa);
+ if (ret)
+ return ret;
+
+ /* set CKB LDO to 1.4V */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa84, 0x8470008a);
+ if (ret)
+ return ret;
+
+ /* turn on SX0 LTBUF */
+ if (is_mt7981(&dev->mt76)) {
+ ret = mt76_wmac_spi_write(dev, adie, 0x074, 0x00000007);
+ } else if (is_mt7986(&dev->mt76)) {
+ ret = mt76_wmac_spi_write(dev, adie, 0x074, 0x00000002);
+ } else {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ /* CK_BUF_SW_EN = 1 (all buf in manual mode.) */
+ ret = mt76_wmac_spi_write(dev, adie, 0xaa4, 0x01001fc0);
+ if (ret)
+ return ret;
+
+ /* BT mode/WF normal mode 00000005 */
+ ret = mt76_wmac_spi_write(dev, adie, 0x070, 0x00000005);
+ if (ret)
+ return ret;
+
+ /* BG thermal sensor offset update */
+ ret = mt76_wmac_spi_write(dev, adie, 0x344, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x374, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3a4, 0x00000088);
+ if (ret)
+ return ret;
+
+ ret = mt76_wmac_spi_write(dev, adie, 0x3d4, 0x00000088);
+ if (ret)
+ return ret;
+
+ /* set WCON VDD IPTAT to "0000" */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa80, 0x44d07000);
+ if (ret)
+ return ret;
+
+ /* change back LTBUF SX3 drving to default value */
+ ret = mt76_wmac_spi_write(dev, adie, 0xa88, 0x3900aaaa);
+ if (ret)
+ return ret;
+
+ /* SM input cap off */
+ ret = mt76_wmac_spi_write(dev, adie, 0x2c4, 0x00000000);
+ if (ret)
+ return ret;
+
+ /* set CKB driving and filter */
+ if (is_mt7986(&dev->mt76))
+ return mt76_wmac_spi_write(dev, adie, 0x2c8, 0x00000072);
+
+ return ret;
+}
+
+static int mt7986_wmac_adie_cfg(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ int ret;
+
+ mt76_wmac_spi_lock(dev);
+ ret = mt76_wmac_spi_write(dev, adie, MT_ADIE_CLK_EN, ~0);
+ if (ret)
+ goto out;
+
+ if (is_7975(dev, adie, adie_type)) {
+ ret = mt76_wmac_spi_rmw(dev, adie, MT_ADIE_7975_COCLK,
+ BIT(1), 0x1);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_xtal_trim_7975(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_patch_7975(dev, adie);
+ } else if (is_7976(dev, adie, adie_type)) {
+ if (mt798x_wmac_check_adie_type(dev) == ADIE_DBDC) {
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_ADIE_WRI_CK_SEL, 0x1c);
+ if (ret)
+ goto out;
+ }
+
+ ret = mt7986_wmac_adie_thermal_cal(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt7986_wmac_adie_xtal_trim_7976(dev, adie);
+ if (ret)
+ goto out;
+
+ ret = mt798x_wmac_adie_patch_7976(dev, adie);
+ }
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return ret;
+}
+
+static int
+mt7986_wmac_afe_cal(struct mt7915_dev *dev, u8 adie, bool dbdc, u32 adie_type)
+{
+ int ret;
+ u8 idx;
+ u32 txcal;
+
+ mt76_wmac_spi_lock(dev);
+ if (is_7975(dev, adie, adie_type))
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x80000000);
+ else
+ ret = mt76_wmac_spi_write(dev, adie,
+ MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x88888005);
+ if (ret)
+ goto out;
+
+ idx = dbdc ? ADIE_DBDC : adie;
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_RCK_MASK, 0x1);
+ usleep_range(60, 100);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_01(idx),
+ MT_AFE_RG_WBG_EN_RCK_MASK, 0x0);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_BPLL_UP_MASK, 0x1);
+ usleep_range(30, 100);
+
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_WPLL_UP_MASK, 0x1);
+ usleep_range(60, 100);
+
+ txcal = (MT_AFE_RG_WBG_EN_TXCAL_BT |
+ MT_AFE_RG_WBG_EN_TXCAL_WF0 |
+ MT_AFE_RG_WBG_EN_TXCAL_WF1 |
+ MT_AFE_RG_WBG_EN_TXCAL_WF2 |
+ MT_AFE_RG_WBG_EN_TXCAL_WF3);
+ if (is_mt7981(&dev->mt76))
+ txcal |= MT_AFE_RG_WBG_EN_TXCAL_WF4;
+
+ mt76_set(dev, MT_AFE_DIG_EN_01(idx), txcal);
+ usleep_range(800, 1000);
+
+ mt76_clear(dev, MT_AFE_DIG_EN_01(idx), txcal);
+ mt76_rmw(dev, MT_AFE_DIG_EN_03(idx),
+ MT_AFE_RG_WBG_EN_PLL_UP_MASK, 0x0);
+
+ ret = mt76_wmac_spi_write(dev, adie, MT_AFE_RG_ENCAL_WBTAC_IF_SW,
+ 0x5);
+
+out:
+ mt76_wmac_spi_unlock(dev);
+
+ return ret;
+}
+
+static void mt7986_wmac_subsys_pll_initial(struct mt7915_dev *dev, u8 band)
+{
+ mt76_rmw(dev, MT_AFE_PLL_STB_TIME(band),
+ MT_AFE_PLL_STB_TIME_MASK, MT_AFE_PLL_STB_TIME_VAL);
+
+ mt76_rmw(dev, MT_AFE_DIG_EN_02(band),
+ MT_AFE_PLL_CFG_MASK, MT_AFE_PLL_CFG_VAL);
+
+ mt76_rmw(dev, MT_AFE_DIG_TOP_01(band),
+ MT_AFE_DIG_TOP_01_MASK, MT_AFE_DIG_TOP_01_VAL);
+}
+
+static void mt7986_wmac_subsys_setting(struct mt7915_dev *dev)
+{
+ /* Subsys pll init */
+ mt7986_wmac_subsys_pll_initial(dev, 0);
+ mt7986_wmac_subsys_pll_initial(dev, 1);
+
+ /* Set legacy OSC control stable time*/
+ mt76_rmw(dev, MT_CONN_INFRA_OSC_RC_EN,
+ MT_CONN_INFRA_OSC_RC_EN_MASK, 0x0);
+ mt76_rmw(dev, MT_CONN_INFRA_OSC_CTRL,
+ MT_CONN_INFRA_OSC_STB_TIME_MASK, 0x80706);
+
+ /* prevent subsys from power on/of in a short time interval */
+ mt76_rmw(dev, MT_TOP_WFSYS_PWR,
+ MT_TOP_PWR_ACK_MASK | MT_TOP_PWR_KEY_MASK,
+ MT_TOP_PWR_KEY);
+}
+
+static int mt7986_wmac_bus_timeout(struct mt7915_dev *dev)
+{
+ mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0x2);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_OFF_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_LIMIT_MASK, 0xc);
+
+ mt76_rmw_field(dev, MT_INFRA_BUS_ON_TIMEOUT,
+ MT_INFRA_BUS_TIMEOUT_EN_MASK, 0xf);
+
+ return mt798x_wmac_coninfra_check(dev);
+}
+
+static void mt7986_wmac_clock_enable(struct mt7915_dev *dev, u32 adie_type)
+{
+ u32 cur;
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_1,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS_WPLL_DIV_2,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+ MT_INFRA_CKGEN_DIV_SEL_MASK, 0x8);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_RFSPI_WPLL_DIV,
+ MT_INFRA_CKGEN_DIV_EN_MASK, 0x1);
+
+ mt76_rmw_field(dev, MT_INFRA_CKGEN_BUS,
+ MT_INFRA_CKGEN_BUS_CLK_SEL_MASK, 0x0);
+
+ mt76_rmw_field(dev, MT_CONN_INFRA_HW_CTRL,
+ MT_CONN_INFRA_HW_CTRL_MASK, 0x1);
+
+ mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+ MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x1);
+
+ usleep_range(900, 1000);
+
+ mt76_wmac_spi_lock(dev);
+ if (is_7975(dev, 0, adie_type) || is_7976(dev, 0, adie_type)) {
+ mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(0),
+ MT_SLP_CTRL_EN_MASK, 0x1);
+
+ read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_ADIE_SLP_CTRL_CK0(0));
+ }
+ if (is_7975(dev, 1, adie_type) || is_7976(dev, 1, adie_type)) {
+ mt76_rmw_field(dev, MT_ADIE_SLP_CTRL_CK0(1),
+ MT_SLP_CTRL_EN_MASK, 0x1);
+
+ read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_CTRL_BSY_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_ADIE_SLP_CTRL_CK0(0));
+ }
+ mt76_wmac_spi_unlock(dev);
+
+ mt76_rmw(dev, MT_TOP_CONN_INFRA_WAKEUP,
+ MT_TOP_CONN_INFRA_WAKEUP_MASK, 0x0);
+ usleep_range(900, 1000);
+}
+
+static int mt7986_wmac_top_wfsys_wakeup(struct mt7915_dev *dev, bool enable)
+{
+ mt76_rmw_field(dev, MT_TOP_WFSYS_WAKEUP,
+ MT_TOP_WFSYS_WAKEUP_MASK, enable);
+
+ usleep_range(900, 1000);
+
+ if (!enable)
+ return 0;
+
+ return mt798x_wmac_coninfra_check(dev);
+}
+
+static int mt7986_wmac_wm_enable(struct mt7915_dev *dev, bool enable)
+{
+ u32 cur;
+
+ if (is_mt7986(&dev->mt76))
+ mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, 0);
+
+ mt76_rmw_field(dev, MT7986_TOP_WM_RESET,
+ MT7986_TOP_WM_RESET_MASK, enable);
+ if (!enable)
+ return 0;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x1d1e),
+ USEC_PER_MSEC, 5000 * USEC_PER_MSEC, false,
+ dev, MT_TOP_CFG_ON_ROM_IDX);
+}
+
+static int mt7986_wmac_wfsys_poweron(struct mt7915_dev *dev, bool enable)
+{
+ u32 mask = MT_TOP_PWR_EN_MASK | MT_TOP_PWR_KEY_MASK;
+ u32 cur;
+
+ mt76_rmw(dev, MT_TOP_WFSYS_PWR, mask,
+ MT_TOP_PWR_KEY | FIELD_PREP(MT_TOP_PWR_EN_MASK, enable));
+
+ return read_poll_timeout(mt76_rr, cur,
+ (FIELD_GET(MT_TOP_WFSYS_RESET_STATUS_MASK, cur) == enable),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_WFSYS_RESET_STATUS);
+}
+
+static int mt7986_wmac_wfsys_setting(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 cur;
+
+ /* Turn off wfsys2conn bus sleep protect */
+ mt76_rmw(dev, MT_CONN_INFRA_WF_SLP_PROT,
+ MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x0);
+
+ ret = mt7986_wmac_wfsys_poweron(dev, true);
+ if (ret)
+ return ret;
+
+ /* Check bus sleep protect */
+
+ ret = read_poll_timeout(mt76_rr, cur,
+ !(cur & MT_CONN_INFRA_CONN_WF_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+ if (ret)
+ return ret;
+
+ ret = read_poll_timeout(mt76_rr, cur, !(cur & MT_SLP_WFDMA2CONN_MASK),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_SLP_STATUS);
+ if (ret)
+ return ret;
+
+ return read_poll_timeout(mt76_rr, cur, (cur == 0x02060000),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_TOP_CFG_IP_VERSION_ADDR);
+}
+
+static void mt7986_wmac_wfsys_set_timeout(struct mt7915_dev *dev)
+{
+ u32 mask = MT_MCU_BUS_TIMEOUT_SET_MASK |
+ MT_MCU_BUS_TIMEOUT_CG_EN_MASK |
+ MT_MCU_BUS_TIMEOUT_EN_MASK;
+ u32 val = FIELD_PREP(MT_MCU_BUS_TIMEOUT_SET_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_TIMEOUT_CG_EN_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_TIMEOUT_EN_MASK, 1);
+
+ mt76_rmw(dev, MT_MCU_BUS_TIMEOUT, mask, val);
+
+ mt76_wr(dev, MT_MCU_BUS_REMAP, 0x810f0000);
+
+ mask = MT_MCU_BUS_DBG_TIMEOUT_SET_MASK |
+ MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK |
+ MT_MCU_BUS_DBG_TIMEOUT_EN_MASK;
+ val = FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_SET_MASK, 0x3aa) |
+ FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_CK_EN_MASK, 1) |
+ FIELD_PREP(MT_MCU_BUS_DBG_TIMEOUT_EN_MASK, 1);
+
+ mt76_rmw(dev, MT_MCU_BUS_DBG_TIMEOUT, mask, val);
+}
+
+static int mt7986_wmac_sku_update(struct mt7915_dev *dev, u32 adie_type)
+{
+ u32 val;
+
+ if (is_7976(dev, 0, adie_type) && is_7976(dev, 1, adie_type))
+ val = 0xf;
+ else if (is_7975(dev, 0, adie_type) && is_7975(dev, 1, adie_type))
+ val = 0xd;
+ else if (is_7976(dev, 0, adie_type))
+ val = 0x7;
+ else if (is_7975(dev, 1, adie_type))
+ val = 0x8;
+ else if (is_7976(dev, 1, adie_type))
+ val = 0xa;
+ else
+ return -EINVAL;
+
+ mt76_wmac_rmw(dev->sku, MT_TOP_POS_SKU, MT_TOP_POS_SKU_MASK,
+ FIELD_PREP(MT_TOP_POS_SKU_MASK, val));
+
+ mt76_wr(dev, MT_CONNINFRA_SKU_DEC_ADDR, val);
+
+ return 0;
+}
+
+static int
+mt7986_wmac_adie_setup(struct mt7915_dev *dev, u8 adie, u32 adie_type)
+{
+ int ret;
+
+ if (!(is_7975(dev, adie, adie_type) || is_7976(dev, adie, adie_type)))
+ return 0;
+
+ ret = mt7986_wmac_adie_cfg(dev, adie, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_afe_cal(dev, adie, false, adie_type);
+ if (ret)
+ return ret;
+
+ if (!adie && (mt798x_wmac_check_adie_type(dev) == ADIE_DBDC))
+ ret = mt7986_wmac_afe_cal(dev, adie, true, adie_type);
+
+ return ret;
+}
+
+static int mt7986_wmac_subsys_powerup(struct mt7915_dev *dev, u32 adie_type)
+{
+ int ret;
+
+ mt7986_wmac_subsys_setting(dev);
+
+ ret = mt7986_wmac_bus_timeout(dev);
+ if (ret)
+ return ret;
+
+ mt7986_wmac_clock_enable(dev, adie_type);
+
+ return 0;
+}
+
+static int mt7986_wmac_wfsys_powerup(struct mt7915_dev *dev)
+{
+ int ret;
+
+ ret = mt7986_wmac_wm_enable(dev, false);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_wfsys_setting(dev);
+ if (ret)
+ return ret;
+
+ mt7986_wmac_wfsys_set_timeout(dev);
+
+ return mt7986_wmac_wm_enable(dev, true);
+}
+
+int mt7986_wmac_enable(struct mt7915_dev *dev)
+{
+ int ret;
+ u32 adie_type;
+
+ ret = mt7986_wmac_consys_reset(dev, true);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_gpio_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_consys_lockup(dev, false);
+ if (ret)
+ return ret;
+
+ ret = mt798x_wmac_coninfra_check(dev);
+ if (ret)
+ return ret;
+
+ ret = mt798x_wmac_coninfra_setup(dev);
+ if (ret)
+ return ret;
+
+ ret = mt798x_wmac_sku_setup(dev, &adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_adie_setup(dev, 0, adie_type);
+ if (ret)
+ return ret;
+
+ /* mt7981 doesn't support a second a-die */
+ if (is_mt7986(&dev->mt76)) {
+ ret = mt7986_wmac_adie_setup(dev, 1, adie_type);
+ if (ret)
+ return ret;
+ }
+
+ ret = mt7986_wmac_subsys_powerup(dev, adie_type);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_top_wfsys_wakeup(dev, true);
+ if (ret)
+ return ret;
+
+ ret = mt7986_wmac_wfsys_powerup(dev);
+ if (ret)
+ return ret;
+
+ return mt7986_wmac_sku_update(dev, adie_type);
+}
+
+void mt7986_wmac_disable(struct mt7915_dev *dev)
+{
+ u32 cur;
+
+ mt7986_wmac_top_wfsys_wakeup(dev, true);
+
+ /* Turn on wfsys2conn bus sleep protect */
+ mt76_rmw_field(dev, MT_CONN_INFRA_WF_SLP_PROT,
+ MT_CONN_INFRA_WF_SLP_PROT_MASK, 0x1);
+
+ /* Check wfsys2conn bus sleep protect */
+ read_poll_timeout(mt76_rr, cur, !(cur ^ MT_CONN_INFRA_CONN),
+ USEC_PER_MSEC, 50 * USEC_PER_MSEC, false,
+ dev, MT_CONN_INFRA_WF_SLP_PROT_RDY);
+
+ mt7986_wmac_wfsys_poweron(dev, false);
+
+ /* Turn back wpll setting */
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_MCU_BPLL_CFG_MASK, 0x2);
+ mt76_rmw_field(dev, MT_AFE_DIG_EN_02(0), MT_AFE_WPLL_CFG_MASK, 0x2);
+
+ /* Reset EMI */
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_EMI_REQ_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_EMI_REQ_MASK, 0x0);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_INFRA_REQ_MASK, 0x1);
+ mt76_rmw_field(dev, MT_CONN_INFRA_EMI_REQ,
+ MT_CONN_INFRA_INFRA_REQ_MASK, 0x0);
+
+ mt7986_wmac_top_wfsys_wakeup(dev, false);
+ mt7986_wmac_consys_lockup(dev, true);
+ mt7986_wmac_consys_reset(dev, false);
+}
+
+static int mt798x_wmac_init(struct mt7915_dev *dev)
+{
+ struct device *pdev = dev->mt76.dev;
+ struct platform_device *pfdev = to_platform_device(pdev);
+ struct clk *mcu_clk, *ap_conn_clk;
+
+ mcu_clk = devm_clk_get(pdev, "mcu");
+ if (IS_ERR(mcu_clk))
+ dev_err(pdev, "mcu clock not found\n");
+ else if (clk_prepare_enable(mcu_clk))
+ dev_err(pdev, "mcu clock configuration failed\n");
+
+ ap_conn_clk = devm_clk_get(pdev, "ap2conn");
+ if (IS_ERR(ap_conn_clk))
+ dev_err(pdev, "ap2conn clock not found\n");
+ else if (clk_prepare_enable(ap_conn_clk))
+ dev_err(pdev, "ap2conn clock configuration failed\n");
+
+ dev->dcm = devm_platform_ioremap_resource(pfdev, 1);
+ if (IS_ERR(dev->dcm))
+ return PTR_ERR(dev->dcm);
+
+ dev->sku = devm_platform_ioremap_resource(pfdev, 2);
+ if (IS_ERR(dev->sku))
+ return PTR_ERR(dev->sku);
+
+ dev->rstc = devm_reset_control_get(pdev, "consys");
+ if (IS_ERR(dev->rstc))
+ return PTR_ERR(dev->rstc);
+
+ return 0;
+}
+
+static int mt798x_wmac_probe(struct platform_device *pdev)
+{
+ void __iomem *mem_base;
+ struct mt7915_dev *dev;
+ struct mt76_dev *mdev;
+ int irq, ret;
+ u32 chip_id;
+
+ chip_id = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ mem_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mem_base)) {
+ dev_err(&pdev->dev, "Failed to get memory resource\n");
+ return PTR_ERR(mem_base);
+ }
+
+ dev = mt7915_mmio_probe(&pdev->dev, mem_base, chip_id);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ mdev = &dev->mt76;
+ ret = mt7915_mmio_wed_init(dev, pdev, false, &irq);
+ if (ret < 0)
+ goto free_device;
+
+ if (!ret) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto free_device;
+ }
+ }
+
+ ret = devm_request_irq(mdev->dev, irq, mt7915_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, dev);
+ if (ret)
+ goto free_device;
+
+ ret = mt798x_wmac_init(dev);
+ if (ret)
+ goto free_irq;
+
+ mt7915_wfsys_reset(dev);
+
+ ret = mt7915_register_device(dev);
+ if (ret)
+ goto free_irq;
+
+ return 0;
+
+free_irq:
+ devm_free_irq(mdev->dev, irq, dev);
+free_device:
+ if (mtk_wed_device_active(&mdev->mmio.wed))
+ mtk_wed_device_detach(&mdev->mmio.wed);
+ mt76_free_device(mdev);
+
+ return ret;
+}
+
+static int mt798x_wmac_remove(struct platform_device *pdev)
+{
+ struct mt7915_dev *dev = platform_get_drvdata(pdev);
+
+ mt7915_unregister_device(dev);
+
+ return 0;
+}
+
+static const struct of_device_id mt798x_wmac_of_match[] = {
+ { .compatible = "mediatek,mt7981-wmac", .data = (u32 *)0x7981 },
+ { .compatible = "mediatek,mt7986-wmac", .data = (u32 *)0x7986 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mt798x_wmac_of_match);
+
+struct platform_driver mt798x_wmac_driver = {
+ .driver = {
+ .name = "mt798x-wmac",
+ .of_match_table = mt798x_wmac_of_match,
+ },
+ .probe = mt798x_wmac_probe,
+ .remove = mt798x_wmac_remove,
+};
+
+MODULE_FIRMWARE(MT7986_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WM_MT7975);
+MODULE_FIRMWARE(MT7986_ROM_PATCH);
+MODULE_FIRMWARE(MT7986_ROM_PATCH_MT7975);
+
+MODULE_FIRMWARE(MT7981_FIRMWARE_WA);
+MODULE_FIRMWARE(MT7981_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7981_ROM_PATCH);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
new file mode 100644
index 0000000000..0d76ae31b3
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -0,0 +1,788 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#include "mt7915.h"
+#include "mac.h"
+#include "mcu.h"
+#include "testmode.h"
+
+enum {
+ TM_CHANGED_TXPOWER,
+ TM_CHANGED_FREQ_OFFSET,
+
+ /* must be last */
+ NUM_TM_CHANGED
+};
+
+static const u8 tm_change_map[] = {
+ [TM_CHANGED_TXPOWER] = MT76_TM_ATTR_TX_POWER,
+ [TM_CHANGED_FREQ_OFFSET] = MT76_TM_ATTR_FREQ_OFFSET,
+};
+
+struct reg_band {
+ u32 band[2];
+};
+
+#define REG_BAND(_list, _reg) \
+ { _list.band[0] = MT_##_reg(0); \
+ _list.band[1] = MT_##_reg(1); }
+#define REG_BAND_IDX(_list, _reg, _idx) \
+ { _list.band[0] = MT_##_reg(0, _idx); \
+ _list.band[1] = MT_##_reg(1, _idx); }
+
+#define TM_REG_MAX_ID 17
+static struct reg_band reg_backup_list[TM_REG_MAX_ID];
+
+
+static int
+mt7915_tm_set_tx_power(struct mt7915_phy *phy)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt76_phy *mphy = phy->mt76;
+ struct cfg80211_chan_def *chandef = &mphy->chandef;
+ int freq = chandef->center_freq1;
+ int ret;
+ struct {
+ u8 format_id;
+ u8 band_idx;
+ s8 tx_power;
+ u8 ant_idx; /* Only 0 is valid */
+ u8 center_chan;
+ u8 rsv[3];
+ } __packed req = {
+ .format_id = 0xf,
+ .band_idx = phy->mt76->band_idx,
+ .center_chan = ieee80211_frequency_to_channel(freq),
+ };
+ u8 *tx_power = NULL;
+
+ if (phy->mt76->test.state != MT76_TM_STATE_OFF)
+ tx_power = phy->mt76->test.tx_power;
+
+ /* Tx power of the other antennas are the same as antenna 0 */
+ if (tx_power && tx_power[0])
+ req.tx_power = tx_power[0];
+
+ ret = mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
+ &req, sizeof(req), false);
+
+ return ret;
+}
+
+static int
+mt7915_tm_set_freq_offset(struct mt7915_phy *phy, bool en, u32 val)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_tm_cmd req = {
+ .testmode_en = en,
+ .param_idx = MCU_ATE_SET_FREQ_OFFSET,
+ .param.freq.band = phy->mt76->band_idx,
+ .param.freq.freq_offset = cpu_to_le32(val),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_tm_mode_ctrl(struct mt7915_dev *dev, bool enable)
+{
+ struct {
+ u8 format_id;
+ bool enable;
+ u8 rsv[2];
+ } __packed req = {
+ .format_id = 0x6,
+ .enable = enable,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76,
+ MCU_EXT_CMD(TX_POWER_FEATURE_CTRL),
+ &req, sizeof(req), false);
+}
+
+static int
+mt7915_tm_set_trx(struct mt7915_phy *phy, int type, bool en)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_tm_cmd req = {
+ .testmode_en = 1,
+ .param_idx = MCU_ATE_SET_TRX,
+ .param.trx.type = type,
+ .param.trx.enable = en,
+ .param.trx.band = phy->mt76->band_idx,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_tm_clean_hwq(struct mt7915_phy *phy, u8 wcid)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_tm_cmd req = {
+ .testmode_en = 1,
+ .param_idx = MCU_ATE_CLEAN_TXQUEUE,
+ .param.clean.wcid = wcid,
+ .param.clean.band = phy->mt76->band_idx,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_tm_set_slot_time(struct mt7915_phy *phy, u8 slot_time, u8 sifs)
+{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_tm_cmd req = {
+ .testmode_en = !(phy->mt76->test.state == MT76_TM_STATE_OFF),
+ .param_idx = MCU_ATE_SET_SLOT_TIME,
+ .param.slot.slot_time = slot_time,
+ .param.slot.sifs = sifs,
+ .param.slot.rifs = 2,
+ .param.slot.eifs = cpu_to_le16(60),
+ .param.slot.band = phy->mt76->band_idx,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ATE_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
+{
+ struct mt7915_dev *dev = phy->dev;
+ u32 op_mode;
+
+ if (!enable)
+ op_mode = TAM_ARB_OP_MODE_NORMAL;
+ else if (mu)
+ op_mode = TAM_ARB_OP_MODE_TEST;
+ else
+ op_mode = TAM_ARB_OP_MODE_FORCE_SU;
+
+ return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
+}
+
+static int
+mt7915_tm_set_wmm_qid(struct mt7915_phy *phy, u8 qid, u8 aifs, u8 cw_min,
+ u16 cw_max, u16 txop)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)phy->monitor_vif->drv_priv;
+ struct mt7915_mcu_tx req = { .total = 1 };
+ struct edca *e = &req.edca[0];
+
+ e->queue = qid + mvif->mt76.wmm_idx * MT76_CONNAC_MAX_WMM_SETS;
+ e->set = WMM_PARAM_SET;
+
+ e->aifs = aifs;
+ e->cw_min = cw_min;
+ e->cw_max = cpu_to_le16(cw_max);
+ e->txop = cpu_to_le16(txop);
+
+ return mt7915_mcu_update_edca(phy->dev, &req);
+}
+
+static int
+mt7915_tm_set_ipg_params(struct mt7915_phy *phy, u32 ipg, u8 mode)
+{
+#define TM_DEFAULT_SIFS 10
+#define TM_MAX_SIFS 127
+#define TM_MAX_AIFSN 0xf
+#define TM_MIN_AIFSN 0x1
+#define BBP_PROC_TIME 1500
+ struct mt7915_dev *dev = phy->dev;
+ u8 sig_ext = (mode == MT76_TM_TX_MODE_CCK) ? 0 : 6;
+ u8 slot_time = 9, sifs = TM_DEFAULT_SIFS;
+ u8 aifsn = TM_MIN_AIFSN;
+ u8 band = phy->mt76->band_idx;
+ u32 i2t_time, tr2t_time, txv_time;
+ u16 cw = 0;
+
+ if (ipg < sig_ext + slot_time + sifs)
+ ipg = 0;
+
+ if (!ipg)
+ goto done;
+
+ ipg -= sig_ext;
+
+ if (ipg <= (TM_MAX_SIFS + slot_time)) {
+ sifs = ipg - slot_time;
+ } else {
+ u32 val = (ipg + slot_time) / slot_time;
+
+ while (val >>= 1)
+ cw++;
+
+ if (cw > 16)
+ cw = 16;
+
+ ipg -= ((1 << cw) - 1) * slot_time;
+
+ aifsn = ipg / slot_time;
+ if (aifsn > TM_MAX_AIFSN)
+ aifsn = TM_MAX_AIFSN;
+
+ ipg -= aifsn * slot_time;
+
+ if (ipg > TM_DEFAULT_SIFS)
+ sifs = min_t(u32, ipg, TM_MAX_SIFS);
+ }
+done:
+ txv_time = mt76_get_field(dev, MT_TMAC_ATCR(band),
+ MT_TMAC_ATCR_TXV_TOUT);
+ txv_time *= 50; /* normal clock time */
+
+ i2t_time = (slot_time * 1000 - txv_time - BBP_PROC_TIME) / 50;
+ tr2t_time = (sifs * 1000 - txv_time - BBP_PROC_TIME) / 50;
+
+ mt76_set(dev, MT_TMAC_TRCR0(band),
+ FIELD_PREP(MT_TMAC_TRCR0_TR2T_CHK, tr2t_time) |
+ FIELD_PREP(MT_TMAC_TRCR0_I2T_CHK, i2t_time));
+
+ mt7915_tm_set_slot_time(phy, slot_time, sifs);
+
+ return mt7915_tm_set_wmm_qid(phy,
+ mt76_connac_lmac_mapping(IEEE80211_AC_BE),
+ aifsn, cw, cw, 0);
+}
+
+static int
+mt7915_tm_set_tx_len(struct mt7915_phy *phy, u32 tx_time)
+{
+ struct mt76_phy *mphy = phy->mt76;
+ struct mt76_testmode_data *td = &mphy->test;
+ struct ieee80211_supported_band *sband;
+ struct rate_info rate = {};
+ u16 flags = 0, tx_len;
+ u32 bitrate;
+ int ret;
+
+ if (!tx_time)
+ return 0;
+
+ rate.mcs = td->tx_rate_idx;
+ rate.nss = td->tx_rate_nss;
+
+ switch (td->tx_rate_mode) {
+ case MT76_TM_TX_MODE_CCK:
+ case MT76_TM_TX_MODE_OFDM:
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else if (mphy->chandef.chan->band == NL80211_BAND_6GHZ)
+ sband = &mphy->sband_6g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate.legacy = sband->bitrates[rate.mcs].bitrate;
+ break;
+ case MT76_TM_TX_MODE_HT:
+ rate.mcs += rate.nss * 8;
+ flags |= RATE_INFO_FLAGS_MCS;
+
+ if (td->tx_rate_sgi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT76_TM_TX_MODE_VHT:
+ flags |= RATE_INFO_FLAGS_VHT_MCS;
+
+ if (td->tx_rate_sgi)
+ flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT76_TM_TX_MODE_HE_SU:
+ case MT76_TM_TX_MODE_HE_EXT_SU:
+ case MT76_TM_TX_MODE_HE_TB:
+ case MT76_TM_TX_MODE_HE_MU:
+ rate.he_gi = td->tx_rate_sgi;
+ flags |= RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ break;
+ }
+ rate.flags = flags;
+
+ switch (mphy->chandef.width) {
+ case NL80211_CHAN_WIDTH_160:
+ case NL80211_CHAN_WIDTH_80P80:
+ rate.bw = RATE_INFO_BW_160;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ rate.bw = RATE_INFO_BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ rate.bw = RATE_INFO_BW_40;
+ break;
+ default:
+ rate.bw = RATE_INFO_BW_20;
+ break;
+ }
+
+ bitrate = cfg80211_calculate_bitrate(&rate);
+ tx_len = bitrate * tx_time / 10 / 8;
+
+ ret = mt76_testmode_alloc_skb(phy->mt76, tx_len);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void
+mt7915_tm_reg_backup_restore(struct mt7915_phy *phy)
+{
+ int n_regs = ARRAY_SIZE(reg_backup_list);
+ struct mt7915_dev *dev = phy->dev;
+ u32 *b = phy->test.reg_backup;
+ u8 band = phy->mt76->band_idx;
+ int i;
+
+ REG_BAND_IDX(reg_backup_list[0], AGG_PCR0, 0);
+ REG_BAND_IDX(reg_backup_list[1], AGG_PCR0, 1);
+ REG_BAND_IDX(reg_backup_list[2], AGG_AWSCR0, 0);
+ REG_BAND_IDX(reg_backup_list[3], AGG_AWSCR0, 1);
+ REG_BAND_IDX(reg_backup_list[4], AGG_AWSCR0, 2);
+ REG_BAND_IDX(reg_backup_list[5], AGG_AWSCR0, 3);
+ REG_BAND(reg_backup_list[6], AGG_MRCR);
+ REG_BAND(reg_backup_list[7], TMAC_TFCR0);
+ REG_BAND(reg_backup_list[8], TMAC_TCR0);
+ REG_BAND(reg_backup_list[9], AGG_ATCR1);
+ REG_BAND(reg_backup_list[10], AGG_ATCR3);
+ REG_BAND(reg_backup_list[11], TMAC_TRCR0);
+ REG_BAND(reg_backup_list[12], TMAC_ICR0);
+ REG_BAND_IDX(reg_backup_list[13], ARB_DRNGR0, 0);
+ REG_BAND_IDX(reg_backup_list[14], ARB_DRNGR0, 1);
+ REG_BAND(reg_backup_list[15], WF_RFCR);
+ REG_BAND(reg_backup_list[16], WF_RFCR1);
+
+ if (phy->mt76->test.state == MT76_TM_STATE_OFF) {
+ for (i = 0; i < n_regs; i++)
+ mt76_wr(dev, reg_backup_list[i].band[band], b[i]);
+ return;
+ }
+
+ if (!b) {
+ b = devm_kzalloc(dev->mt76.dev, 4 * n_regs, GFP_KERNEL);
+ if (!b)
+ return;
+
+ phy->test.reg_backup = b;
+ for (i = 0; i < n_regs; i++)
+ b[i] = mt76_rr(dev, reg_backup_list[i].band[band]);
+ }
+
+ mt76_clear(dev, MT_AGG_PCR0(band, 0), MT_AGG_PCR0_MM_PROT |
+ MT_AGG_PCR0_GF_PROT | MT_AGG_PCR0_ERP_PROT |
+ MT_AGG_PCR0_VHT_PROT | MT_AGG_PCR0_BW20_PROT |
+ MT_AGG_PCR0_BW40_PROT | MT_AGG_PCR0_BW80_PROT);
+ mt76_set(dev, MT_AGG_PCR0(band, 0), MT_AGG_PCR0_PTA_WIN_DIS);
+
+ mt76_wr(dev, MT_AGG_PCR0(band, 1), MT_AGG_PCR1_RTS0_NUM_THRES |
+ MT_AGG_PCR1_RTS0_LEN_THRES);
+
+ mt76_clear(dev, MT_AGG_MRCR(band), MT_AGG_MRCR_BAR_CNT_LIMIT |
+ MT_AGG_MRCR_LAST_RTS_CTS_RN | MT_AGG_MRCR_RTS_FAIL_LIMIT |
+ MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT);
+
+ mt76_rmw(dev, MT_AGG_MRCR(band), MT_AGG_MRCR_RTS_FAIL_LIMIT |
+ MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT,
+ FIELD_PREP(MT_AGG_MRCR_RTS_FAIL_LIMIT, 1) |
+ FIELD_PREP(MT_AGG_MRCR_TXCMD_RTS_FAIL_LIMIT, 1));
+
+ mt76_wr(dev, MT_TMAC_TFCR0(band), 0);
+ mt76_clear(dev, MT_TMAC_TCR0(band), MT_TMAC_TCR0_TBTT_STOP_CTRL);
+
+ /* config rx filter for testmode rx */
+ mt76_wr(dev, MT_WF_RFCR(band), 0xcf70a);
+ mt76_wr(dev, MT_WF_RFCR1(band), 0);
+}
+
+static void
+mt7915_tm_init(struct mt7915_phy *phy, bool en)
+{
+ struct mt7915_dev *dev = phy->dev;
+
+ if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
+ return;
+
+ mt7915_mcu_set_sku_en(phy, !en);
+
+ mt7915_tm_mode_ctrl(dev, en);
+ mt7915_tm_reg_backup_restore(phy);
+ mt7915_tm_set_trx(phy, TM_MAC_TXRX, !en);
+
+ mt7915_mcu_add_bss_info(phy, phy->monitor_vif, en);
+ mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, en);
+
+ if (!en)
+ mt7915_tm_set_tam_arb(phy, en, 0);
+}
+
+static void
+mt7915_tm_update_channel(struct mt7915_phy *phy)
+{
+ mutex_unlock(&phy->dev->mt76.mutex);
+ mt7915_set_channel(phy);
+ mutex_lock(&phy->dev->mt76.mutex);
+
+ mt7915_mcu_set_chan_info(phy, MCU_EXT_CMD(SET_RX_PATH));
+}
+
+static void
+mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ struct mt7915_dev *dev = phy->dev;
+ struct ieee80211_tx_info *info;
+ u8 duty_cycle = td->tx_duty_cycle;
+ u32 tx_time = td->tx_time;
+ u32 ipg = td->tx_ipg;
+
+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
+ mt7915_tm_clean_hwq(phy, dev->mt76.global_wcid.idx);
+
+ if (en) {
+ mt7915_tm_update_channel(phy);
+
+ if (td->tx_spe_idx)
+ phy->test.spe_idx = td->tx_spe_idx;
+ else
+ phy->test.spe_idx = mt76_connac_spe_idx(td->tx_antenna_mask);
+ }
+
+ mt7915_tm_set_tam_arb(phy, en,
+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
+
+ /* if all three params are set, duty_cycle will be ignored */
+ if (duty_cycle && tx_time && !ipg) {
+ ipg = tx_time * 100 / duty_cycle - tx_time;
+ } else if (duty_cycle && !tx_time && ipg) {
+ if (duty_cycle < 100)
+ tx_time = duty_cycle * ipg / (100 - duty_cycle);
+ }
+
+ mt7915_tm_set_ipg_params(phy, ipg, td->tx_rate_mode);
+ mt7915_tm_set_tx_len(phy, tx_time);
+
+ if (ipg)
+ td->tx_queued_limit = MT76_TM_TIMEOUT * 1000000 / ipg / 2;
+
+ if (!en || !td->tx_skb)
+ return;
+
+ info = IEEE80211_SKB_CB(td->tx_skb);
+ info->control.vif = phy->monitor_vif;
+
+ mt7915_tm_set_trx(phy, TM_MAC_TX, en);
+}
+
+static void
+mt7915_tm_set_rx_frames(struct mt7915_phy *phy, bool en)
+{
+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, false);
+
+ if (en) {
+ struct mt7915_dev *dev = phy->dev;
+
+ mt7915_tm_update_channel(phy);
+
+ /* read-clear */
+ mt76_rr(dev, MT_MIB_SDR3(phy->mt76->band_idx));
+ mt7915_tm_set_trx(phy, TM_MAC_RX_RXV, en);
+ }
+}
+
+static int
+mt7915_tm_rf_switch_mode(struct mt7915_dev *dev, u32 oper)
+{
+ struct mt7915_tm_rf_test req = {
+ .op.op_mode = cpu_to_le32(oper),
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_TEST), &req,
+ sizeof(req), true);
+}
+
+static int
+mt7915_tm_set_tx_cont(struct mt7915_phy *phy, bool en)
+{
+#define TX_CONT_START 0x05
+#define TX_CONT_STOP 0x06
+ struct mt7915_dev *dev = phy->dev;
+ struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
+ int freq1 = ieee80211_frequency_to_channel(chandef->center_freq1);
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ u32 func_idx = en ? TX_CONT_START : TX_CONT_STOP;
+ u8 rate_idx = td->tx_rate_idx, mode;
+ u8 band = phy->mt76->band_idx;
+ u16 rateval;
+ struct mt7915_tm_rf_test req = {
+ .action = 1,
+ .icap_len = 120,
+ .op.rf.func_idx = cpu_to_le32(func_idx),
+ };
+ struct tm_tx_cont *tx_cont = &req.op.rf.param.tx_cont;
+
+ tx_cont->control_ch = chandef->chan->hw_value;
+ tx_cont->center_ch = freq1;
+ tx_cont->tx_ant = td->tx_antenna_mask;
+ tx_cont->band = band;
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_40:
+ tx_cont->bw = CMD_CBW_40MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ tx_cont->bw = CMD_CBW_80MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ tx_cont->bw = CMD_CBW_8080MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ tx_cont->bw = CMD_CBW_160MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ tx_cont->bw = CMD_CBW_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ tx_cont->bw = CMD_CBW_10MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ tx_cont->bw = CMD_CBW_20MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ tx_cont->bw = CMD_CBW_20MHZ;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!en) {
+ req.op.rf.param.func_data = cpu_to_le32(band);
+ goto out;
+ }
+
+ if (td->tx_rate_mode <= MT76_TM_TX_MODE_OFDM) {
+ struct ieee80211_supported_band *sband;
+ u8 idx = rate_idx;
+
+ if (chandef->chan->band == NL80211_BAND_5GHZ)
+ sband = &phy->mt76->sband_5g.sband;
+ else if (chandef->chan->band == NL80211_BAND_6GHZ)
+ sband = &phy->mt76->sband_6g.sband;
+ else
+ sband = &phy->mt76->sband_2g.sband;
+
+ if (td->tx_rate_mode == MT76_TM_TX_MODE_OFDM)
+ idx += 4;
+ rate_idx = sband->bitrates[idx].hw_value & 0xff;
+ }
+
+ switch (td->tx_rate_mode) {
+ case MT76_TM_TX_MODE_CCK:
+ mode = MT_PHY_TYPE_CCK;
+ break;
+ case MT76_TM_TX_MODE_OFDM:
+ mode = MT_PHY_TYPE_OFDM;
+ break;
+ case MT76_TM_TX_MODE_HT:
+ mode = MT_PHY_TYPE_HT;
+ break;
+ case MT76_TM_TX_MODE_VHT:
+ mode = MT_PHY_TYPE_VHT;
+ break;
+ case MT76_TM_TX_MODE_HE_SU:
+ mode = MT_PHY_TYPE_HE_SU;
+ break;
+ case MT76_TM_TX_MODE_HE_EXT_SU:
+ mode = MT_PHY_TYPE_HE_EXT_SU;
+ break;
+ case MT76_TM_TX_MODE_HE_TB:
+ mode = MT_PHY_TYPE_HE_TB;
+ break;
+ case MT76_TM_TX_MODE_HE_MU:
+ mode = MT_PHY_TYPE_HE_MU;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ rateval = mode << 6 | rate_idx;
+ tx_cont->rateval = cpu_to_le16(rateval);
+
+out:
+ if (!en) {
+ int ret;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_TEST), &req,
+ sizeof(req), true);
+ if (ret)
+ return ret;
+
+ return mt7915_tm_rf_switch_mode(dev, RF_OPER_NORMAL);
+ }
+
+ mt7915_tm_rf_switch_mode(dev, RF_OPER_RF_TEST);
+ mt7915_tm_update_channel(phy);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RF_TEST), &req,
+ sizeof(req), true);
+}
+
+static void
+mt7915_tm_update_params(struct mt7915_phy *phy, u32 changed)
+{
+ struct mt76_testmode_data *td = &phy->mt76->test;
+ bool en = phy->mt76->test.state != MT76_TM_STATE_OFF;
+
+ if (changed & BIT(TM_CHANGED_FREQ_OFFSET))
+ mt7915_tm_set_freq_offset(phy, en, en ? td->freq_offset : 0);
+ if (changed & BIT(TM_CHANGED_TXPOWER))
+ mt7915_tm_set_tx_power(phy);
+}
+
+static int
+mt7915_tm_set_state(struct mt76_phy *mphy, enum mt76_testmode_state state)
+{
+ struct mt76_testmode_data *td = &mphy->test;
+ struct mt7915_phy *phy = mphy->priv;
+ enum mt76_testmode_state prev_state = td->state;
+
+ mphy->test.state = state;
+
+ if (prev_state == MT76_TM_STATE_TX_FRAMES ||
+ state == MT76_TM_STATE_TX_FRAMES)
+ mt7915_tm_set_tx_frames(phy, state == MT76_TM_STATE_TX_FRAMES);
+ else if (prev_state == MT76_TM_STATE_RX_FRAMES ||
+ state == MT76_TM_STATE_RX_FRAMES)
+ mt7915_tm_set_rx_frames(phy, state == MT76_TM_STATE_RX_FRAMES);
+ else if (prev_state == MT76_TM_STATE_TX_CONT ||
+ state == MT76_TM_STATE_TX_CONT)
+ mt7915_tm_set_tx_cont(phy, state == MT76_TM_STATE_TX_CONT);
+ else if (prev_state == MT76_TM_STATE_OFF ||
+ state == MT76_TM_STATE_OFF)
+ mt7915_tm_init(phy, !(state == MT76_TM_STATE_OFF));
+
+ if ((state == MT76_TM_STATE_IDLE &&
+ prev_state == MT76_TM_STATE_OFF) ||
+ (state == MT76_TM_STATE_OFF &&
+ prev_state == MT76_TM_STATE_IDLE)) {
+ u32 changed = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
+ u16 cur = tm_change_map[i];
+
+ if (td->param_set[cur / 32] & BIT(cur % 32))
+ changed |= BIT(i);
+ }
+
+ mt7915_tm_update_params(phy, changed);
+ }
+
+ return 0;
+}
+
+static int
+mt7915_tm_set_params(struct mt76_phy *mphy, struct nlattr **tb,
+ enum mt76_testmode_state new_state)
+{
+ struct mt76_testmode_data *td = &mphy->test;
+ struct mt7915_phy *phy = mphy->priv;
+ struct mt7915_dev *dev = phy->dev;
+ u32 chainmask = mphy->chainmask, changed = 0;
+ bool ext_phy = phy != &dev->phy;
+ int i;
+
+ BUILD_BUG_ON(NUM_TM_CHANGED >= 32);
+
+ if (new_state == MT76_TM_STATE_OFF ||
+ td->state == MT76_TM_STATE_OFF)
+ return 0;
+
+ chainmask = ext_phy ? chainmask >> dev->chainshift : chainmask;
+ if (td->tx_antenna_mask > chainmask)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(tm_change_map); i++) {
+ if (tb[tm_change_map[i]])
+ changed |= BIT(i);
+ }
+
+ mt7915_tm_update_params(phy, changed);
+
+ return 0;
+}
+
+static int
+mt7915_tm_dump_stats(struct mt76_phy *mphy, struct sk_buff *msg)
+{
+ struct mt7915_phy *phy = mphy->priv;
+ struct mt7915_dev *dev = phy->dev;
+ enum mt76_rxq_id q;
+ void *rx, *rssi;
+ u16 fcs_err;
+ int i;
+ u32 cnt;
+
+ rx = nla_nest_start(msg, MT76_TM_STATS_ATTR_LAST_RX);
+ if (!rx)
+ return -ENOMEM;
+
+ if (nla_put_s32(msg, MT76_TM_RX_ATTR_FREQ_OFFSET, phy->test.last_freq_offset))
+ return -ENOMEM;
+
+ rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_RCPI);
+ if (!rssi)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(phy->test.last_rcpi); i++)
+ if (nla_put_u8(msg, i, phy->test.last_rcpi[i]))
+ return -ENOMEM;
+
+ nla_nest_end(msg, rssi);
+
+ rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_IB_RSSI);
+ if (!rssi)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(phy->test.last_ib_rssi); i++)
+ if (nla_put_s8(msg, i, phy->test.last_ib_rssi[i]))
+ return -ENOMEM;
+
+ nla_nest_end(msg, rssi);
+
+ rssi = nla_nest_start(msg, MT76_TM_RX_ATTR_WB_RSSI);
+ if (!rssi)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(phy->test.last_wb_rssi); i++)
+ if (nla_put_s8(msg, i, phy->test.last_wb_rssi[i]))
+ return -ENOMEM;
+
+ nla_nest_end(msg, rssi);
+
+ if (nla_put_u8(msg, MT76_TM_RX_ATTR_SNR, phy->test.last_snr))
+ return -ENOMEM;
+
+ nla_nest_end(msg, rx);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR3(phy->mt76->band_idx));
+ fcs_err = is_mt7915(&dev->mt76) ? FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK, cnt) :
+ FIELD_GET(MT_MIB_SDR3_FCS_ERR_MASK_MT7916, cnt);
+
+ q = phy->mt76->band_idx ? MT_RXQ_BAND1 : MT_RXQ_MAIN;
+ mphy->test.rx_stats.packets[q] += fcs_err;
+ mphy->test.rx_stats.fcs_error[q] += fcs_err;
+
+ return 0;
+}
+
+const struct mt76_testmode_ops mt7915_testmode_ops = {
+ .set_state = mt7915_tm_set_state,
+ .set_params = mt7915_tm_set_params,
+ .dump_stats = mt7915_tm_dump_stats,
+};
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
new file mode 100644
index 0000000000..5573ac3093
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: ISC */
+/* Copyright (C) 2020 MediaTek Inc. */
+
+#ifndef __MT7915_TESTMODE_H
+#define __MT7915_TESTMODE_H
+
+struct mt7915_tm_trx {
+ u8 type;
+ u8 enable;
+ u8 band;
+ u8 rsv;
+};
+
+struct mt7915_tm_freq_offset {
+ u8 band;
+ __le32 freq_offset;
+};
+
+struct mt7915_tm_slot_time {
+ u8 slot_time;
+ u8 sifs;
+ u8 rifs;
+ u8 _rsv;
+ __le16 eifs;
+ u8 band;
+ u8 _rsv1[5];
+};
+
+struct mt7915_tm_clean_txq {
+ bool sta_pause;
+ u8 wcid; /* 256 sta */
+ u8 band;
+ u8 rsv;
+};
+
+struct mt7915_tm_cmd {
+ u8 testmode_en;
+ u8 param_idx;
+ u8 _rsv[2];
+ union {
+ __le32 data;
+ struct mt7915_tm_trx trx;
+ struct mt7915_tm_freq_offset freq;
+ struct mt7915_tm_slot_time slot;
+ struct mt7915_tm_clean_txq clean;
+ u8 test[72];
+ } param;
+} __packed;
+
+enum {
+ TM_MAC_TX = 1,
+ TM_MAC_RX,
+ TM_MAC_TXRX,
+ TM_MAC_TXRX_RXV,
+ TM_MAC_RXV,
+ TM_MAC_RX_RXV,
+};
+
+struct tm_tx_cont {
+ u8 control_ch;
+ u8 center_ch;
+ u8 bw;
+ u8 tx_ant;
+ __le16 rateval;
+ u8 band;
+ u8 txfd_mode;
+};
+
+struct mt7915_tm_rf_test {
+ u8 action;
+ u8 icap_len;
+ u8 _rsv[2];
+ union {
+ __le32 op_mode;
+ __le32 freq;
+
+ struct {
+ __le32 func_idx;
+ union {
+ __le32 func_data;
+ __le32 cal_dump;
+
+ struct tm_tx_cont tx_cont;
+
+ u8 _pad[80];
+ } param;
+ } rf;
+ } op;
+} __packed;
+
+enum {
+ RF_OPER_NORMAL,
+ RF_OPER_RF_TEST,
+ RF_OPER_ICAP,
+ RF_OPER_ICAP_OVERLAP,
+ RF_OPER_WIFI_SPECTRUM,
+};
+
+enum {
+ TAM_ARB_OP_MODE_NORMAL = 1,
+ TAM_ARB_OP_MODE_TEST,
+ TAM_ARB_OP_MODE_FORCE_SU = 5,
+};
+
+#endif